Gaston895 commited on
Commit
018d1f9
Β·
verified Β·
1 Parent(s): c072db0

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +153 -147
app.py CHANGED
@@ -38,139 +38,135 @@ GLOBAL_REGIONS = [
38
  # HuggingFace Token for all providers
39
  HF_TOKEN = os.getenv('HF_TOKEN', '')
40
 
41
- # HuggingFace Inference API using linked DeepSeek models
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  API_PROVIDERS = [
43
  {
44
  "name": "deepseek-v3.2-exp",
45
- "base_url": "https://api-inference.huggingface.co/models/deepseek-ai/DeepSeek-V3.2-Exp",
46
- "headers": {
47
- "Authorization": f"Bearer {HF_TOKEN}",
48
- "Content-Type": "application/json"
49
- },
50
- "provider": "hf_inference",
51
  "model": "deepseek-ai/DeepSeek-V3.2-Exp"
52
  },
53
  {
54
  "name": "deepseek-v3-base",
55
- "base_url": "https://api-inference.huggingface.co/models/deepseek-ai/DeepSeek-V3-Base",
56
- "headers": {
57
- "Authorization": f"Bearer {HF_TOKEN}",
58
- "Content-Type": "application/json"
59
- },
60
- "provider": "hf_inference",
61
  "model": "deepseek-ai/DeepSeek-V3-Base"
62
  },
63
  {
64
  "name": "deepseek-fallback",
65
- "base_url": "https://api-inference.huggingface.co/models/deepseek-ai/DeepSeek-V3.2-Exp",
66
- "headers": {
67
- "Authorization": f"Bearer {HF_TOKEN}",
68
- "Content-Type": "application/json"
69
- },
70
- "provider": "hf_inference",
71
  "model": "deepseek-ai/DeepSeek-V3.2-Exp"
72
  }
73
  ]
74
 
75
  def get_next_provider():
76
- """Get the next available API provider for failover"""
77
  global current_provider_index
78
- provider = API_PROVIDERS[current_provider_index]
79
- current_provider_index = (current_provider_index + 1) % len(API_PROVIDERS)
80
- return provider
 
 
81
 
82
- def call_deepseek_api(messages: List[Dict], provider: Dict, max_retries: int = 3) -> Optional[str]:
83
- """Call DeepSeek API via HuggingFace Inference API"""
84
- try:
85
- # Convert messages to HF Inference API format
86
- conversation = ""
87
- for msg in messages:
88
- if msg["role"] == "system":
89
- conversation += f"System: {msg['content']}\n\n"
90
- elif msg["role"] == "user":
91
- conversation += f"User: {msg['content']}\n\n"
92
- elif msg["role"] == "assistant":
93
- conversation += f"Assistant: {msg['content']}\n\n"
94
-
95
- conversation += "Assistant: "
96
 
97
- payload = {
98
- "inputs": conversation,
99
- "parameters": {
100
- "max_new_tokens": 1024,
101
- "temperature": 0.7,
102
- "top_p": 0.9,
103
- "do_sample": True,
104
- "return_full_text": False
105
- },
106
- "options": {
107
- "wait_for_model": True,
108
- "use_cache": False
109
- }
110
- }
111
 
112
- response = requests.post(
113
- provider['base_url'],
114
- headers=provider["headers"],
115
- json=payload,
116
- timeout=60
 
 
117
  )
118
 
119
- if response.status_code == 200:
120
- result = response.json()
121
-
122
- # Handle HuggingFace Inference API response format
123
- if isinstance(result, list) and len(result) > 0:
124
- content = result[0].get("generated_text", "")
125
- logger.info(f"βœ… Success with provider: {provider['name']} ({provider['provider']})")
126
- return content.strip()
127
- elif isinstance(result, dict) and "generated_text" in result:
128
- content = result["generated_text"]
129
- logger.info(f"βœ… Success with provider: {provider['name']} ({provider['provider']})")
130
- return content.strip()
131
- else:
132
- logger.warning(f"⚠️ Unexpected response format from {provider['name']}: {result}")
133
- return None
134
-
135
- elif response.status_code == 429:
136
- logger.warning(f"πŸ’Έ Rate limit reached for {provider['name']}, switching to next provider...")
137
- return None
138
- elif response.status_code == 503:
139
- logger.warning(f"⏳ Model loading for {provider['name']}, waiting 15 seconds...")
140
- time.sleep(15) # Wait longer for model to load
141
- return None
142
  else:
143
- logger.warning(f"⚠️ API error from {provider['name']}: {response.status_code} - {response.text}")
144
  return None
145
 
146
- except requests.exceptions.Timeout:
147
- logger.warning(f"⏰ Timeout with provider: {provider['name']}")
148
- return None
149
- except requests.exceptions.RequestException as e:
150
- logger.warning(f"πŸ”Œ Connection error with {provider['name']}: {str(e)}")
151
- return None
152
  except Exception as e:
153
- logger.error(f"❌ Unexpected error with {provider['name']}: {str(e)}")
 
 
 
 
 
 
 
154
  return None
155
 
156
  def call_deepseek_with_failover(messages: List[Dict]) -> str:
157
- """Call DeepSeek-V3.2-Exp with automatic provider failover"""
158
- providers_tried = []
 
159
 
160
- # Try all providers until one succeeds
161
- for attempt in range(len(API_PROVIDERS)):
162
- provider = get_next_provider()
163
- providers_tried.append(provider['name'])
 
 
 
 
 
164
 
165
- logger.info(f"πŸ”„ Trying provider: {provider['name']} (attempt {attempt + 1}/{len(API_PROVIDERS)})")
166
 
167
- result = call_deepseek_api(messages, provider)
168
  if result:
169
  return result
170
 
171
- # If all providers failed
172
- logger.error(f"❌ All providers failed: {', '.join(providers_tried)}")
173
- return f"I apologize, but all API providers ({', '.join(providers_tried)}) are currently unavailable. Please try again in a moment."
174
 
175
  def format_response(text):
176
  """Clean and format the model response"""
@@ -302,7 +298,7 @@ Provide comprehensive analysis with specific numerical values for all calculated
302
  "year": year,
303
  "analysis_timestamp": datetime.now().isoformat(),
304
  "model": MODEL_NAME,
305
- "providers": [p["name"] for p in API_PROVIDERS]
306
  }
307
 
308
  # Extract metrics from model response
@@ -358,8 +354,8 @@ def status():
358
  'model': MODEL_NAME,
359
  'version': AEGIS_VERSION,
360
  'regions': len(GLOBAL_REGIONS),
361
- 'providers': [p["name"] for p in API_PROVIDERS],
362
- 'current_provider': API_PROVIDERS[current_provider_index]["name"],
363
  'api_ready': True
364
  })
365
 
@@ -384,24 +380,31 @@ def chat():
384
  logger.warning("Empty message provided in chat request")
385
  return jsonify({'error': 'No message provided'}), 400
386
 
387
- # Check if HF_TOKEN is available (only token needed for HuggingFace Inference API)
388
  if not HF_TOKEN or len(HF_TOKEN) < 10:
389
  logger.error("HF_TOKEN not configured or invalid!")
390
  return jsonify({
391
  'error': 'HuggingFace token not configured. Please set HF_TOKEN in Space Settings > Secrets.',
392
  'provider_status': 'HF_TOKEN missing'
393
  }), 500
 
 
 
 
 
 
 
394
 
395
  # Generate response using AEGIS Multi-Domain System with DeepSeek-V3.2-Exp
396
  logger.info("Generating AEGIS analysis...")
397
  response = analyze_with_aegis_conductor(message, analysis_type)
398
 
399
- if not response or response.startswith("I apologize, but all API providers"):
400
- logger.error("All API providers failed or returned empty response")
401
  return jsonify({
402
- 'error': 'All API providers are currently unavailable. Please check your API keys and try again.',
403
  'response': response,
404
- 'provider_status': 'All providers failed'
405
  }), 503
406
 
407
  logger.info(f"Successfully generated response of length: {len(response)}")
@@ -411,9 +414,10 @@ def chat():
411
  'timestamp': time.time(),
412
  'model': f"AEGIS BIO LAB {AEGIS_VERSION} CONDUCTOR (DeepSeek-V3.2-Exp)",
413
  'analysis_type': analysis_type,
414
- 'provider': f"{API_PROVIDERS[current_provider_index]['name']} ({API_PROVIDERS[current_provider_index]['provider']})",
415
- 'hf_inference': True,
416
- 'hf_token_configured': bool(HF_TOKEN and len(HF_TOKEN) > 10)
 
417
  })
418
 
419
  except Exception as e:
@@ -475,23 +479,23 @@ def diagnostic():
475
  </div>
476
 
477
  <div class="status good">
478
- <strong>Note:</strong> Only HF_TOKEN is required - all providers use HuggingFace Inference API
479
  </div>
480
 
481
  <div class="status good">
482
  <strong>Model:</strong> {MODEL_NAME}
483
  </div>
484
 
485
- <div class="status good">
486
- <strong>Providers:</strong> {len(API_PROVIDERS)} configured
487
  </div>
488
 
489
  <div class="status good">
490
- <strong>Current Provider:</strong> {API_PROVIDERS[current_provider_index]['name']} ({API_PROVIDERS[current_provider_index]['provider']})
491
  </div>
492
 
493
  <h2>πŸ”§ Configuration Instructions</h2>
494
- <p>Only HuggingFace Token is required (all providers use HF Inference API):</p>
495
  <ol>
496
  <li>Go to your space settings</li>
497
  <li>Click "Variables and secrets"</li>
@@ -511,75 +515,77 @@ def clear_chat():
511
 
512
  @app.route('/provider_status', methods=['GET'])
513
  def provider_status():
514
- """Get status of all API providers with key availability"""
515
  provider_statuses = []
516
 
517
- for i, provider in enumerate(API_PROVIDERS):
518
- # Check if API key is available for this provider
519
- # Since all providers use HuggingFace Inference API, only HF_TOKEN is needed
520
- has_api_key = bool(HF_TOKEN and len(HF_TOKEN) > 10)
521
-
522
  status_info = {
523
- "name": provider["name"],
524
- "provider_type": provider["provider"],
525
  "active": i == current_provider_index,
526
- "base_url": provider["base_url"],
527
- "model": provider.get("model", MODEL_NAME),
528
- "has_api_key": has_api_key,
529
- "key_status": "βœ… Configured" if has_api_key else "❌ Missing"
530
  }
531
  provider_statuses.append(status_info)
532
 
533
  # Count available providers
534
- available_providers = sum(1 for p in provider_statuses if p["has_api_key"])
535
 
536
  return jsonify({
537
  "providers": provider_statuses,
538
- "current_provider": API_PROVIDERS[current_provider_index]["name"],
539
- "current_provider_type": API_PROVIDERS[current_provider_index]["provider"],
540
- "total_providers": len(API_PROVIDERS),
541
  "available_providers": available_providers,
542
  "model": MODEL_NAME,
543
  "api_keys_status": {
544
  "hf_token": bool(HF_TOKEN and len(HF_TOKEN) > 10),
545
- "note": "Only HF_TOKEN required - all providers use HuggingFace Inference API"
546
  }
547
  })
548
 
549
  @app.route('/switch_provider', methods=['POST'])
550
  def switch_provider():
551
- """Manually switch to next provider"""
552
  global current_provider_index
553
- old_provider = API_PROVIDERS[current_provider_index]["name"]
554
- old_provider_type = API_PROVIDERS[current_provider_index]["provider"]
555
- current_provider_index = (current_provider_index + 1) % len(API_PROVIDERS)
556
- new_provider = API_PROVIDERS[current_provider_index]["name"]
557
- new_provider_type = API_PROVIDERS[current_provider_index]["provider"]
 
 
 
 
 
558
 
559
  return jsonify({
560
- "switched_from": f"{old_provider} ({old_provider_type})",
561
- "switched_to": f"{new_provider} ({new_provider_type})",
562
- "message": f"Switched from {old_provider} to {new_provider} provider",
563
  "model": MODEL_NAME
564
  })
565
 
566
  # Initialize system
567
  def initialize_system():
568
- """Initialize AEGIS system with DeepSeek-V3.2-Exp via HuggingFace Inference"""
569
  global loading_status
570
 
571
- print("πŸš€ AEGIS BIO LAB 10 CONDUCTOR initializing with DeepSeek-V3.2-Exp via HuggingFace...")
572
  print(f"πŸ€— Model: {MODEL_NAME}")
573
 
574
- # Fix the f-string syntax error
575
- provider_list = ', '.join([f"{p['name']} ({p['provider']})" for p in API_PROVIDERS])
576
- print(f"πŸ“‘ Available providers: {provider_list}")
 
 
 
577
 
578
- print(f"πŸ”„ Automatic failover enabled across {len(API_PROVIDERS)} providers")
579
  print(f"🌍 Global analysis across {len(GLOBAL_REGIONS)} regions")
580
  print(f"πŸ”‘ Using HuggingFace Token: {'βœ… Valid' if HF_TOKEN and len(HF_TOKEN) > 10 else '❌ Missing'}")
581
 
582
- loading_status = f"AEGIS BIO LAB {AEGIS_VERSION} CONDUCTOR ready with DeepSeek-V3.2-Exp via HuggingFace Inference"
583
  print("βœ… AEGIS BIO LAB 10 CONDUCTOR ready!")
584
 
585
  if __name__ == '__main__':
 
38
  # HuggingFace Token for all providers
39
  HF_TOKEN = os.getenv('HF_TOKEN', '')
40
 
41
+ # Initialize InferenceClient instances for DeepSeek models
42
+ inference_clients = []
43
+ if HF_TOKEN:
44
+ try:
45
+ # Primary DeepSeek-V3.2-Exp client
46
+ primary_client = InferenceClient(
47
+ model="deepseek-ai/DeepSeek-V3.2-Exp",
48
+ token=HF_TOKEN
49
+ )
50
+ inference_clients.append({
51
+ "name": "deepseek-v3.2-exp",
52
+ "client": primary_client,
53
+ "model": "deepseek-ai/DeepSeek-V3.2-Exp"
54
+ })
55
+
56
+ # Secondary DeepSeek-V3-Base client
57
+ secondary_client = InferenceClient(
58
+ model="deepseek-ai/DeepSeek-V3-Base",
59
+ token=HF_TOKEN
60
+ )
61
+ inference_clients.append({
62
+ "name": "deepseek-v3-base",
63
+ "client": secondary_client,
64
+ "model": "deepseek-ai/DeepSeek-V3-Base"
65
+ })
66
+
67
+ # Fallback client (same as primary)
68
+ fallback_client = InferenceClient(
69
+ model="deepseek-ai/DeepSeek-V3.2-Exp",
70
+ token=HF_TOKEN
71
+ )
72
+ inference_clients.append({
73
+ "name": "deepseek-fallback",
74
+ "client": fallback_client,
75
+ "model": "deepseek-ai/DeepSeek-V3.2-Exp"
76
+ })
77
+
78
+ except Exception as e:
79
+ logger.error(f"Failed to initialize InferenceClient: {e}")
80
+
81
+ # Legacy API_PROVIDERS for compatibility (now using InferenceClient)
82
  API_PROVIDERS = [
83
  {
84
  "name": "deepseek-v3.2-exp",
85
+ "provider": "hf_inference_client",
 
 
 
 
 
86
  "model": "deepseek-ai/DeepSeek-V3.2-Exp"
87
  },
88
  {
89
  "name": "deepseek-v3-base",
90
+ "provider": "hf_inference_client",
 
 
 
 
 
91
  "model": "deepseek-ai/DeepSeek-V3-Base"
92
  },
93
  {
94
  "name": "deepseek-fallback",
95
+ "provider": "hf_inference_client",
 
 
 
 
 
96
  "model": "deepseek-ai/DeepSeek-V3.2-Exp"
97
  }
98
  ]
99
 
100
  def get_next_provider():
101
+ """Get the next available InferenceClient for failover"""
102
  global current_provider_index
103
+ if not inference_clients:
104
+ return None
105
+ client_info = inference_clients[current_provider_index]
106
+ current_provider_index = (current_provider_index + 1) % len(inference_clients)
107
+ return client_info
108
 
109
+ def call_deepseek_api(messages: List[Dict], client_info: Dict, max_retries: int = 3) -> Optional[str]:
110
+ """Call DeepSeek API via HuggingFace InferenceClient"""
111
+ if not client_info:
112
+ return None
 
 
 
 
 
 
 
 
 
 
113
 
114
+ try:
115
+ client = client_info["client"]
 
 
 
 
 
 
 
 
 
 
 
 
116
 
117
+ # Use InferenceClient.chat_completion method
118
+ response = client.chat_completion(
119
+ messages=messages,
120
+ max_tokens=1024,
121
+ temperature=0.7,
122
+ top_p=0.9,
123
+ stream=False
124
  )
125
 
126
+ # Extract content from response
127
+ if hasattr(response, 'choices') and len(response.choices) > 0:
128
+ content = response.choices[0].message.content
129
+ logger.info(f"βœ… Success with InferenceClient: {client_info['name']} ({client_info['model']})")
130
+ return content.strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
  else:
132
+ logger.warning(f"⚠️ Unexpected response format from {client_info['name']}: {response}")
133
  return None
134
 
 
 
 
 
 
 
135
  except Exception as e:
136
+ error_msg = str(e).lower()
137
+ if "rate limit" in error_msg or "429" in error_msg:
138
+ logger.warning(f"πŸ’Έ Rate limit reached for {client_info['name']}, switching to next provider...")
139
+ elif "503" in error_msg or "service unavailable" in error_msg:
140
+ logger.warning(f"⏳ Model loading for {client_info['name']}, waiting...")
141
+ time.sleep(10) # Wait for model to load
142
+ else:
143
+ logger.warning(f"⚠️ API error from {client_info['name']}: {str(e)}")
144
  return None
145
 
146
  def call_deepseek_with_failover(messages: List[Dict]) -> str:
147
+ """Call DeepSeek-V3.2-Exp with automatic InferenceClient failover"""
148
+ if not inference_clients:
149
+ return "InferenceClient not initialized. Please check HF_TOKEN configuration."
150
 
151
+ clients_tried = []
152
+
153
+ # Try all clients until one succeeds
154
+ for attempt in range(len(inference_clients)):
155
+ client_info = get_next_provider()
156
+ if not client_info:
157
+ continue
158
+
159
+ clients_tried.append(client_info['name'])
160
 
161
+ logger.info(f"πŸ”„ Trying InferenceClient: {client_info['name']} (attempt {attempt + 1}/{len(inference_clients)})")
162
 
163
+ result = call_deepseek_api(messages, client_info)
164
  if result:
165
  return result
166
 
167
+ # If all clients failed
168
+ logger.error(f"❌ All InferenceClients failed: {', '.join(clients_tried)}")
169
+ return f"I apologize, but all API providers ({', '.join(clients_tried)}) are currently unavailable. Please try again in a moment."
170
 
171
  def format_response(text):
172
  """Clean and format the model response"""
 
298
  "year": year,
299
  "analysis_timestamp": datetime.now().isoformat(),
300
  "model": MODEL_NAME,
301
+ "providers": [c["name"] for c in inference_clients]
302
  }
303
 
304
  # Extract metrics from model response
 
354
  'model': MODEL_NAME,
355
  'version': AEGIS_VERSION,
356
  'regions': len(GLOBAL_REGIONS),
357
+ 'providers': [c["name"] for c in inference_clients],
358
+ 'current_provider': inference_clients[current_provider_index]["name"] if inference_clients else "none",
359
  'api_ready': True
360
  })
361
 
 
380
  logger.warning("Empty message provided in chat request")
381
  return jsonify({'error': 'No message provided'}), 400
382
 
383
+ # Check if HF_TOKEN is available and InferenceClients are initialized
384
  if not HF_TOKEN or len(HF_TOKEN) < 10:
385
  logger.error("HF_TOKEN not configured or invalid!")
386
  return jsonify({
387
  'error': 'HuggingFace token not configured. Please set HF_TOKEN in Space Settings > Secrets.',
388
  'provider_status': 'HF_TOKEN missing'
389
  }), 500
390
+
391
+ if not inference_clients:
392
+ logger.error("InferenceClients not initialized!")
393
+ return jsonify({
394
+ 'error': 'InferenceClients not initialized. Please check HF_TOKEN configuration.',
395
+ 'provider_status': 'InferenceClients not initialized'
396
+ }), 500
397
 
398
  # Generate response using AEGIS Multi-Domain System with DeepSeek-V3.2-Exp
399
  logger.info("Generating AEGIS analysis...")
400
  response = analyze_with_aegis_conductor(message, analysis_type)
401
 
402
+ if not response or response.startswith("I apologize, but all API providers") or response.startswith("InferenceClient not initialized"):
403
+ logger.error("All InferenceClients failed or returned empty response")
404
  return jsonify({
405
+ 'error': 'All API providers are currently unavailable. Please check your HF_TOKEN and try again.',
406
  'response': response,
407
+ 'provider_status': 'All InferenceClients failed'
408
  }), 503
409
 
410
  logger.info(f"Successfully generated response of length: {len(response)}")
 
414
  'timestamp': time.time(),
415
  'model': f"AEGIS BIO LAB {AEGIS_VERSION} CONDUCTOR (DeepSeek-V3.2-Exp)",
416
  'analysis_type': analysis_type,
417
+ 'provider': f"{inference_clients[current_provider_index]['name'] if inference_clients else 'none'} (InferenceClient)",
418
+ 'hf_inference_client': True,
419
+ 'hf_token_configured': bool(HF_TOKEN and len(HF_TOKEN) > 10),
420
+ 'clients_initialized': len(inference_clients)
421
  })
422
 
423
  except Exception as e:
 
479
  </div>
480
 
481
  <div class="status good">
482
+ <strong>Note:</strong> Using HuggingFace InferenceClient - only HF_TOKEN required
483
  </div>
484
 
485
  <div class="status good">
486
  <strong>Model:</strong> {MODEL_NAME}
487
  </div>
488
 
489
+ <div class="status {'good' if inference_clients else 'bad'}">
490
+ <strong>InferenceClients:</strong> {len(inference_clients)} initialized
491
  </div>
492
 
493
  <div class="status good">
494
+ <strong>Current Client:</strong> {inference_clients[current_provider_index]["name"] if inference_clients else "none"}
495
  </div>
496
 
497
  <h2>πŸ”§ Configuration Instructions</h2>
498
+ <p>Using HuggingFace InferenceClient (only HF_TOKEN required):</p>
499
  <ol>
500
  <li>Go to your space settings</li>
501
  <li>Click "Variables and secrets"</li>
 
515
 
516
  @app.route('/provider_status', methods=['GET'])
517
  def provider_status():
518
+ """Get status of all InferenceClient providers"""
519
  provider_statuses = []
520
 
521
+ for i, client_info in enumerate(inference_clients):
 
 
 
 
522
  status_info = {
523
+ "name": client_info["name"],
524
+ "provider_type": "hf_inference_client",
525
  "active": i == current_provider_index,
526
+ "model": client_info.get("model", MODEL_NAME),
527
+ "has_api_key": bool(HF_TOKEN and len(HF_TOKEN) > 10),
528
+ "key_status": "βœ… Configured" if HF_TOKEN and len(HF_TOKEN) > 10 else "❌ Missing"
 
529
  }
530
  provider_statuses.append(status_info)
531
 
532
  # Count available providers
533
+ available_providers = len(inference_clients) if HF_TOKEN and len(HF_TOKEN) > 10 else 0
534
 
535
  return jsonify({
536
  "providers": provider_statuses,
537
+ "current_provider": inference_clients[current_provider_index]["name"] if inference_clients else "none",
538
+ "current_provider_type": "hf_inference_client",
539
+ "total_providers": len(inference_clients),
540
  "available_providers": available_providers,
541
  "model": MODEL_NAME,
542
  "api_keys_status": {
543
  "hf_token": bool(HF_TOKEN and len(HF_TOKEN) > 10),
544
+ "note": "Using HuggingFace InferenceClient - only HF_TOKEN required"
545
  }
546
  })
547
 
548
  @app.route('/switch_provider', methods=['POST'])
549
  def switch_provider():
550
+ """Manually switch to next InferenceClient provider"""
551
  global current_provider_index
552
+
553
+ if not inference_clients:
554
+ return jsonify({
555
+ "error": "No InferenceClients available",
556
+ "message": "Please check HF_TOKEN configuration"
557
+ }), 500
558
+
559
+ old_client = inference_clients[current_provider_index]["name"]
560
+ current_provider_index = (current_provider_index + 1) % len(inference_clients)
561
+ new_client = inference_clients[current_provider_index]["name"]
562
 
563
  return jsonify({
564
+ "switched_from": f"{old_client} (InferenceClient)",
565
+ "switched_to": f"{new_client} (InferenceClient)",
566
+ "message": f"Switched from {old_client} to {new_client} InferenceClient",
567
  "model": MODEL_NAME
568
  })
569
 
570
  # Initialize system
571
  def initialize_system():
572
+ """Initialize AEGIS system with DeepSeek-V3.2-Exp via HuggingFace InferenceClient"""
573
  global loading_status
574
 
575
+ print("πŸš€ AEGIS BIO LAB 10 CONDUCTOR initializing with DeepSeek-V3.2-Exp via HuggingFace InferenceClient...")
576
  print(f"πŸ€— Model: {MODEL_NAME}")
577
 
578
+ if inference_clients:
579
+ client_list = ', '.join([f"{c['name']} ({c['model']})" for c in inference_clients])
580
+ print(f"πŸ“‘ Available InferenceClients: {client_list}")
581
+ print(f"πŸ”„ Automatic failover enabled across {len(inference_clients)} InferenceClients")
582
+ else:
583
+ print("❌ No InferenceClients initialized - check HF_TOKEN")
584
 
 
585
  print(f"🌍 Global analysis across {len(GLOBAL_REGIONS)} regions")
586
  print(f"πŸ”‘ Using HuggingFace Token: {'βœ… Valid' if HF_TOKEN and len(HF_TOKEN) > 10 else '❌ Missing'}")
587
 
588
+ loading_status = f"AEGIS BIO LAB {AEGIS_VERSION} CONDUCTOR ready with DeepSeek-V3.2-Exp via HuggingFace InferenceClient"
589
  print("βœ… AEGIS BIO LAB 10 CONDUCTOR ready!")
590
 
591
  if __name__ == '__main__':