Gaston895 commited on
Commit
e4d4a74
Β·
verified Β·
1 Parent(s): d0283fa

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +121 -112
app.py CHANGED
@@ -11,7 +11,6 @@ import logging
11
  import os
12
  from dotenv import load_dotenv
13
  import random
14
- from openai import OpenAI
15
 
16
  # Load environment variables
17
  load_dotenv()
@@ -38,111 +37,121 @@ GLOBAL_REGIONS = [
38
  # HuggingFace Token for all providers
39
  HF_TOKEN = os.getenv('HF_TOKEN', '')
40
 
41
- # Initialize OpenAI-compatible clients for DeepSeek models using HuggingFace router
42
- openai_clients = []
43
  if HF_TOKEN:
44
- try:
45
- # Primary DeepSeek-V3.2-Exp client
46
- primary_client = OpenAI(
47
- api_key=HF_TOKEN,
48
- base_url="https://router.huggingface.co/v1"
49
- )
50
- openai_clients.append({
 
 
 
51
  "name": "deepseek-v3.2-exp",
52
- "client": primary_client,
 
53
  "model": "deepseek-ai/DeepSeek-V3.2-Exp"
54
- })
55
-
56
- # Secondary DeepSeek-V3-Base client
57
- secondary_client = OpenAI(
58
- api_key=HF_TOKEN,
59
- base_url="https://router.huggingface.co/v1"
60
- )
61
- openai_clients.append({
62
  "name": "deepseek-v3-base",
63
- "client": secondary_client,
 
64
  "model": "deepseek-ai/DeepSeek-V3-Base"
65
- })
66
-
67
- # Fallback client (same as primary)
68
- fallback_client = OpenAI(
69
- api_key=HF_TOKEN,
70
- base_url="https://router.huggingface.co/v1"
71
- )
72
- openai_clients.append({
73
  "name": "deepseek-fallback",
74
- "client": fallback_client,
 
75
  "model": "deepseek-ai/DeepSeek-V3.2-Exp"
76
- })
77
-
78
- except Exception as e:
79
- logger.error(f"Failed to initialize OpenAI clients: {e}")
80
 
81
- # Legacy API_PROVIDERS for compatibility (now using OpenAI client)
82
  API_PROVIDERS = [
83
  {
84
  "name": "deepseek-v3.2-exp",
85
- "provider": "hf_router_openai",
86
  "model": "deepseek-ai/DeepSeek-V3.2-Exp"
87
  },
88
  {
89
  "name": "deepseek-v3-base",
90
- "provider": "hf_router_openai",
91
  "model": "deepseek-ai/DeepSeek-V3-Base"
92
  },
93
  {
94
  "name": "deepseek-fallback",
95
- "provider": "hf_router_openai",
96
  "model": "deepseek-ai/DeepSeek-V3.2-Exp"
97
  }
98
  ]
99
 
100
  def get_next_provider():
101
- """Get the next available OpenAI client for failover"""
102
  global current_provider_index
103
- if not openai_clients:
104
  return None
105
- client_info = openai_clients[current_provider_index]
106
- current_provider_index = (current_provider_index + 1) % len(openai_clients)
107
  return client_info
108
 
109
  def call_deepseek_api(messages: List[Dict], client_info: Dict, max_retries: int = 3) -> Optional[str]:
110
- """Call DeepSeek API via HuggingFace Router using OpenAI client"""
111
  if not client_info:
112
  return None
113
 
114
  try:
115
- client = client_info["client"]
116
- model = client_info["model"]
 
 
 
 
 
 
 
117
 
118
- # Use OpenAI client with HuggingFace router
119
- response = client.chat.completions.create(
120
- model=model,
121
- messages=messages,
122
- max_tokens=1024,
123
- temperature=0.7,
124
- top_p=0.9,
125
- stream=False
126
  )
127
 
128
- # Extract content from response
129
- if response.choices and len(response.choices) > 0:
130
- content = response.choices[0].message.content
131
- logger.info(f"βœ… Success with OpenAI client: {client_info['name']} ({client_info['model']})")
132
- return content.strip()
133
- else:
134
- logger.warning(f"⚠️ Unexpected response format from {client_info['name']}: {response}")
135
- return None
136
 
137
- except Exception as e:
138
- error_msg = str(e).lower()
139
- if "rate limit" in error_msg or "429" in error_msg:
 
 
 
 
 
 
 
140
  logger.warning(f"πŸ’Έ Rate limit reached for {client_info['name']}, switching to next provider...")
141
- elif "503" in error_msg or "service unavailable" in error_msg:
 
142
  logger.warning(f"⏳ Model loading for {client_info['name']}, waiting...")
143
- time.sleep(10) # Wait for model to load
 
144
  else:
145
- logger.warning(f"⚠️ API error from {client_info['name']}: {str(e)}")
 
 
 
 
 
 
 
 
 
 
146
  return None
147
  if "rate limit" in error_msg or "429" in error_msg:
148
  logger.warning(f"πŸ’Έ Rate limit reached for {client_info['name']}, switching to next provider...")
@@ -154,28 +163,28 @@ def call_deepseek_api(messages: List[Dict], client_info: Dict, max_retries: int
154
  return None
155
 
156
  def call_deepseek_with_failover(messages: List[Dict]) -> str:
157
- """Call DeepSeek-V3.2-Exp with automatic OpenAI client failover"""
158
- if not openai_clients:
159
- return "OpenAI clients not initialized. Please check HF_TOKEN configuration."
160
 
161
  clients_tried = []
162
 
163
  # Try all clients until one succeeds
164
- for attempt in range(len(openai_clients)):
165
  client_info = get_next_provider()
166
  if not client_info:
167
  continue
168
 
169
  clients_tried.append(client_info['name'])
170
 
171
- logger.info(f"πŸ”„ Trying OpenAI client: {client_info['name']} (attempt {attempt + 1}/{len(openai_clients)})")
172
 
173
  result = call_deepseek_api(messages, client_info)
174
  if result:
175
  return result
176
 
177
  # If all clients failed
178
- logger.error(f"❌ All OpenAI clients failed: {', '.join(clients_tried)}")
179
  return f"I apologize, but all API providers ({', '.join(clients_tried)}) are currently unavailable. Please try again in a moment."
180
 
181
  def format_response(text):
@@ -308,7 +317,7 @@ Provide comprehensive analysis with specific numerical values for all calculated
308
  "year": year,
309
  "analysis_timestamp": datetime.now().isoformat(),
310
  "model": MODEL_NAME,
311
- "providers": [c["name"] for c in openai_clients]
312
  }
313
 
314
  # Extract metrics from model response
@@ -364,8 +373,8 @@ def status():
364
  'model': MODEL_NAME,
365
  'version': AEGIS_VERSION,
366
  'regions': len(GLOBAL_REGIONS),
367
- 'providers': [c["name"] for c in openai_clients],
368
- 'current_provider': openai_clients[current_provider_index]["name"] if openai_clients else "none",
369
  'api_ready': True
370
  })
371
 
@@ -398,23 +407,23 @@ def chat():
398
  'provider_status': 'HF_TOKEN missing'
399
  }), 500
400
 
401
- if not openai_clients:
402
- logger.error("OpenAI clients not initialized!")
403
  return jsonify({
404
- 'error': 'OpenAI clients not initialized. Please check HF_TOKEN configuration.',
405
- 'provider_status': 'OpenAI clients not initialized'
406
  }), 500
407
 
408
  # Generate response using AEGIS Multi-Domain System with DeepSeek-V3.2-Exp
409
  logger.info("Generating AEGIS analysis...")
410
  response = analyze_with_aegis_conductor(message, analysis_type)
411
 
412
- if not response or response.startswith("I apologize, but all API providers") or response.startswith("OpenAI clients not initialized"):
413
- logger.error("All OpenAI clients failed or returned empty response")
414
  return jsonify({
415
  'error': 'All API providers are currently unavailable. Please check your HF_TOKEN and try again.',
416
  'response': response,
417
- 'provider_status': 'All OpenAI clients failed'
418
  }), 503
419
 
420
  logger.info(f"Successfully generated response of length: {len(response)}")
@@ -424,10 +433,10 @@ def chat():
424
  'timestamp': time.time(),
425
  'model': f"AEGIS BIO LAB {AEGIS_VERSION} CONDUCTOR (DeepSeek-V3.2-Exp)",
426
  'analysis_type': analysis_type,
427
- 'provider': f"{openai_clients[current_provider_index]['name'] if openai_clients else 'none'} (OpenAI)",
428
- 'hf_router_openai': True,
429
  'hf_token_configured': bool(HF_TOKEN and len(HF_TOKEN) > 10),
430
- 'clients_initialized': len(openai_clients)
431
  })
432
 
433
  except Exception as e:
@@ -496,16 +505,16 @@ def diagnostic():
496
  <strong>Model:</strong> {MODEL_NAME}
497
  </div>
498
 
499
- <div class="status {'good' if openai_clients else 'bad'}">
500
- <strong>OpenAI Clients:</strong> {len(openai_clients)} initialized
501
  </div>
502
 
503
  <div class="status good">
504
- <strong>Current Client:</strong> {openai_clients[current_provider_index]["name"] if openai_clients else "none"}
505
  </div>
506
 
507
  <h2>πŸ”§ Configuration Instructions</h2>
508
- <p>Using HuggingFace Router with OpenAI client (only HF_TOKEN required):</p>
509
  <ol>
510
  <li>Go to your space settings</li>
511
  <li>Click "Variables and secrets"</li>
@@ -528,10 +537,10 @@ def provider_status():
528
  """Get status of all InferenceClient providers"""
529
  provider_statuses = []
530
 
531
- for i, client_info in enumerate(openai_clients):
532
  status_info = {
533
  "name": client_info["name"],
534
- "provider_type": "hf_router_openai",
535
  "active": i == current_provider_index,
536
  "model": client_info.get("model", MODEL_NAME),
537
  "has_api_key": bool(HF_TOKEN and len(HF_TOKEN) > 10),
@@ -540,40 +549,40 @@ def provider_status():
540
  provider_statuses.append(status_info)
541
 
542
  # Count available providers
543
- available_providers = len(openai_clients) if HF_TOKEN and len(HF_TOKEN) > 10 else 0
544
 
545
  return jsonify({
546
  "providers": provider_statuses,
547
- "current_provider": openai_clients[current_provider_index]["name"] if openai_clients else "none",
548
- "current_provider_type": "hf_router_openai",
549
- "total_providers": len(openai_clients),
550
  "available_providers": available_providers,
551
  "model": MODEL_NAME,
552
  "api_keys_status": {
553
  "hf_token": bool(HF_TOKEN and len(HF_TOKEN) > 10),
554
- "note": "Using HuggingFace Router with OpenAI client - only HF_TOKEN required"
555
  }
556
  })
557
 
558
  @app.route('/switch_provider', methods=['POST'])
559
  def switch_provider():
560
- """Manually switch to next OpenAI client provider"""
561
  global current_provider_index
562
 
563
- if not openai_clients:
564
  return jsonify({
565
- "error": "No OpenAI clients available",
566
  "message": "Please check HF_TOKEN configuration"
567
  }), 500
568
 
569
- old_client = openai_clients[current_provider_index]["name"]
570
- current_provider_index = (current_provider_index + 1) % len(openai_clients)
571
- new_client = openai_clients[current_provider_index]["name"]
572
 
573
  return jsonify({
574
- "switched_from": f"{old_client} (OpenAI)",
575
- "switched_to": f"{new_client} (OpenAI)",
576
- "message": f"Switched from {old_client} to {new_client} OpenAI client",
577
  "model": MODEL_NAME
578
  })
579
 
@@ -584,19 +593,19 @@ def initialize_system():
584
 
585
  print("πŸš€ AEGIS BIO LAB 10 CONDUCTOR initializing with DeepSeek-V3.2-Exp via HuggingFace Router...")
586
  print(f"πŸ€— Model: {MODEL_NAME}")
587
- print(f"πŸ”— Endpoint: https://router.huggingface.co/v1")
588
 
589
- if openai_clients:
590
- client_list = ', '.join([f"{c['name']} ({c['model']})" for c in openai_clients])
591
- print(f"πŸ“‘ Available OpenAI clients: {client_list}")
592
- print(f"πŸ”„ Automatic failover enabled across {len(openai_clients)} OpenAI clients")
593
  else:
594
- print("❌ No OpenAI clients initialized - check HF_TOKEN")
595
 
596
  print(f"🌍 Global analysis across {len(GLOBAL_REGIONS)} regions")
597
  print(f"πŸ”‘ Using HuggingFace Token: {'βœ… Valid' if HF_TOKEN and len(HF_TOKEN) > 10 else '❌ Missing'}")
598
 
599
- loading_status = f"AEGIS BIO LAB {AEGIS_VERSION} CONDUCTOR ready with DeepSeek-V3.2-Exp via HuggingFace Router"
600
  print("βœ… AEGIS BIO LAB 10 CONDUCTOR ready!")
601
 
602
  if __name__ == '__main__':
 
11
  import os
12
  from dotenv import load_dotenv
13
  import random
 
14
 
15
  # Load environment variables
16
  load_dotenv()
 
37
  # HuggingFace Token for all providers
38
  HF_TOKEN = os.getenv('HF_TOKEN', '')
39
 
40
+ # Initialize HTTP clients for DeepSeek models using HuggingFace router
41
+ http_clients = []
42
  if HF_TOKEN:
43
+ # HuggingFace router endpoint
44
+ router_url = "https://router.huggingface.co/v1/chat/completions"
45
+ headers = {
46
+ "Authorization": f"Bearer {HF_TOKEN}",
47
+ "Content-Type": "application/json"
48
+ }
49
+
50
+ # Create client configurations for different models
51
+ http_clients = [
52
+ {
53
  "name": "deepseek-v3.2-exp",
54
+ "url": router_url,
55
+ "headers": headers,
56
  "model": "deepseek-ai/DeepSeek-V3.2-Exp"
57
+ },
58
+ {
 
 
 
 
 
 
59
  "name": "deepseek-v3-base",
60
+ "url": router_url,
61
+ "headers": headers,
62
  "model": "deepseek-ai/DeepSeek-V3-Base"
63
+ },
64
+ {
 
 
 
 
 
 
65
  "name": "deepseek-fallback",
66
+ "url": router_url,
67
+ "headers": headers,
68
  "model": "deepseek-ai/DeepSeek-V3.2-Exp"
69
+ }
70
+ ]
 
 
71
 
72
+ # Legacy API_PROVIDERS for compatibility (now using HTTP requests)
73
  API_PROVIDERS = [
74
  {
75
  "name": "deepseek-v3.2-exp",
76
+ "provider": "hf_router_http",
77
  "model": "deepseek-ai/DeepSeek-V3.2-Exp"
78
  },
79
  {
80
  "name": "deepseek-v3-base",
81
+ "provider": "hf_router_http",
82
  "model": "deepseek-ai/DeepSeek-V3-Base"
83
  },
84
  {
85
  "name": "deepseek-fallback",
86
+ "provider": "hf_router_http",
87
  "model": "deepseek-ai/DeepSeek-V3.2-Exp"
88
  }
89
  ]
90
 
91
  def get_next_provider():
92
+ """Get the next available HTTP client for failover"""
93
  global current_provider_index
94
+ if not http_clients:
95
  return None
96
+ client_info = http_clients[current_provider_index]
97
+ current_provider_index = (current_provider_index + 1) % len(http_clients)
98
  return client_info
99
 
100
  def call_deepseek_api(messages: List[Dict], client_info: Dict, max_retries: int = 3) -> Optional[str]:
101
+ """Call DeepSeek API via HuggingFace Router using HTTP requests"""
102
  if not client_info:
103
  return None
104
 
105
  try:
106
+ # Prepare OpenAI-compatible payload
107
+ payload = {
108
+ "model": client_info["model"],
109
+ "messages": messages,
110
+ "max_tokens": 1024,
111
+ "temperature": 0.7,
112
+ "top_p": 0.9,
113
+ "stream": False
114
+ }
115
 
116
+ # Make HTTP request to HuggingFace router
117
+ response = requests.post(
118
+ client_info["url"],
119
+ headers=client_info["headers"],
120
+ json=payload,
121
+ timeout=60
 
 
122
  )
123
 
124
+ if response.status_code == 200:
125
+ result = response.json()
 
 
 
 
 
 
126
 
127
+ # Extract content from OpenAI-compatible response
128
+ if "choices" in result and len(result["choices"]) > 0:
129
+ content = result["choices"][0]["message"]["content"]
130
+ logger.info(f"βœ… Success with HTTP client: {client_info['name']} ({client_info['model']})")
131
+ return content.strip()
132
+ else:
133
+ logger.warning(f"⚠️ Unexpected response format from {client_info['name']}: {result}")
134
+ return None
135
+
136
+ elif response.status_code == 429:
137
  logger.warning(f"πŸ’Έ Rate limit reached for {client_info['name']}, switching to next provider...")
138
+ return None
139
+ elif response.status_code == 503:
140
  logger.warning(f"⏳ Model loading for {client_info['name']}, waiting...")
141
+ time.sleep(10)
142
+ return None
143
  else:
144
+ logger.warning(f"⚠️ API error from {client_info['name']}: {response.status_code} - {response.text}")
145
+ return None
146
+
147
+ except requests.exceptions.Timeout:
148
+ logger.warning(f"⏰ Timeout with {client_info['name']}")
149
+ return None
150
+ except requests.exceptions.RequestException as e:
151
+ logger.warning(f"πŸ”Œ Connection error with {client_info['name']}: {str(e)}")
152
+ return None
153
+ except Exception as e:
154
+ logger.warning(f"⚠️ Unexpected error with {client_info['name']}: {str(e)}")
155
  return None
156
  if "rate limit" in error_msg or "429" in error_msg:
157
  logger.warning(f"πŸ’Έ Rate limit reached for {client_info['name']}, switching to next provider...")
 
163
  return None
164
 
165
  def call_deepseek_with_failover(messages: List[Dict]) -> str:
166
+ """Call DeepSeek-V3.2-Exp with automatic HTTP client failover"""
167
+ if not http_clients:
168
+ return "HTTP clients not initialized. Please check HF_TOKEN configuration."
169
 
170
  clients_tried = []
171
 
172
  # Try all clients until one succeeds
173
+ for attempt in range(len(http_clients)):
174
  client_info = get_next_provider()
175
  if not client_info:
176
  continue
177
 
178
  clients_tried.append(client_info['name'])
179
 
180
+ logger.info(f"πŸ”„ Trying HTTP client: {client_info['name']} (attempt {attempt + 1}/{len(http_clients)})")
181
 
182
  result = call_deepseek_api(messages, client_info)
183
  if result:
184
  return result
185
 
186
  # If all clients failed
187
+ logger.error(f"❌ All HTTP clients failed: {', '.join(clients_tried)}")
188
  return f"I apologize, but all API providers ({', '.join(clients_tried)}) are currently unavailable. Please try again in a moment."
189
 
190
  def format_response(text):
 
317
  "year": year,
318
  "analysis_timestamp": datetime.now().isoformat(),
319
  "model": MODEL_NAME,
320
+ "providers": [c["name"] for c in http_clients]
321
  }
322
 
323
  # Extract metrics from model response
 
373
  'model': MODEL_NAME,
374
  'version': AEGIS_VERSION,
375
  'regions': len(GLOBAL_REGIONS),
376
+ 'providers': [c["name"] for c in http_clients],
377
+ 'current_provider': http_clients[current_provider_index]["name"] if http_clients else "none",
378
  'api_ready': True
379
  })
380
 
 
407
  'provider_status': 'HF_TOKEN missing'
408
  }), 500
409
 
410
+ if not http_clients:
411
+ logger.error("HTTP clients not initialized!")
412
  return jsonify({
413
+ 'error': 'HTTP clients not initialized. Please check HF_TOKEN configuration.',
414
+ 'provider_status': 'HTTP clients not initialized'
415
  }), 500
416
 
417
  # Generate response using AEGIS Multi-Domain System with DeepSeek-V3.2-Exp
418
  logger.info("Generating AEGIS analysis...")
419
  response = analyze_with_aegis_conductor(message, analysis_type)
420
 
421
+ if not response or response.startswith("I apologize, but all API providers") or response.startswith("HTTP clients not initialized"):
422
+ logger.error("All HTTP clients failed or returned empty response")
423
  return jsonify({
424
  'error': 'All API providers are currently unavailable. Please check your HF_TOKEN and try again.',
425
  'response': response,
426
+ 'provider_status': 'All HTTP clients failed'
427
  }), 503
428
 
429
  logger.info(f"Successfully generated response of length: {len(response)}")
 
433
  'timestamp': time.time(),
434
  'model': f"AEGIS BIO LAB {AEGIS_VERSION} CONDUCTOR (DeepSeek-V3.2-Exp)",
435
  'analysis_type': analysis_type,
436
+ 'provider': f"{http_clients[current_provider_index]['name'] if http_clients else 'none'} (HTTP)",
437
+ 'hf_router_http': True,
438
  'hf_token_configured': bool(HF_TOKEN and len(HF_TOKEN) > 10),
439
+ 'clients_initialized': len(http_clients)
440
  })
441
 
442
  except Exception as e:
 
505
  <strong>Model:</strong> {MODEL_NAME}
506
  </div>
507
 
508
+ <div class="status {'good' if http_clients else 'bad'}">
509
+ <strong>HTTP Clients:</strong> {len(http_clients)} initialized
510
  </div>
511
 
512
  <div class="status good">
513
+ <strong>Current Client:</strong> {http_clients[current_provider_index]["name"] if http_clients else "none"}
514
  </div>
515
 
516
  <h2>πŸ”§ Configuration Instructions</h2>
517
+ <p>Using HuggingFace Router with HTTP requests (only HF_TOKEN required):</p>
518
  <ol>
519
  <li>Go to your space settings</li>
520
  <li>Click "Variables and secrets"</li>
 
537
  """Get status of all InferenceClient providers"""
538
  provider_statuses = []
539
 
540
+ for i, client_info in enumerate(http_clients):
541
  status_info = {
542
  "name": client_info["name"],
543
+ "provider_type": "hf_router_http",
544
  "active": i == current_provider_index,
545
  "model": client_info.get("model", MODEL_NAME),
546
  "has_api_key": bool(HF_TOKEN and len(HF_TOKEN) > 10),
 
549
  provider_statuses.append(status_info)
550
 
551
  # Count available providers
552
+ available_providers = len(http_clients) if HF_TOKEN and len(HF_TOKEN) > 10 else 0
553
 
554
  return jsonify({
555
  "providers": provider_statuses,
556
+ "current_provider": http_clients[current_provider_index]["name"] if http_clients else "none",
557
+ "current_provider_type": "hf_router_http",
558
+ "total_providers": len(http_clients),
559
  "available_providers": available_providers,
560
  "model": MODEL_NAME,
561
  "api_keys_status": {
562
  "hf_token": bool(HF_TOKEN and len(HF_TOKEN) > 10),
563
+ "note": "Using HuggingFace Router with HTTP requests - only HF_TOKEN required"
564
  }
565
  })
566
 
567
  @app.route('/switch_provider', methods=['POST'])
568
  def switch_provider():
569
+ """Manually switch to next HTTP client provider"""
570
  global current_provider_index
571
 
572
+ if not http_clients:
573
  return jsonify({
574
+ "error": "No HTTP clients available",
575
  "message": "Please check HF_TOKEN configuration"
576
  }), 500
577
 
578
+ old_client = http_clients[current_provider_index]["name"]
579
+ current_provider_index = (current_provider_index + 1) % len(http_clients)
580
+ new_client = http_clients[current_provider_index]["name"]
581
 
582
  return jsonify({
583
+ "switched_from": f"{old_client} (HTTP)",
584
+ "switched_to": f"{new_client} (HTTP)",
585
+ "message": f"Switched from {old_client} to {new_client} HTTP client",
586
  "model": MODEL_NAME
587
  })
588
 
 
593
 
594
  print("πŸš€ AEGIS BIO LAB 10 CONDUCTOR initializing with DeepSeek-V3.2-Exp via HuggingFace Router...")
595
  print(f"πŸ€— Model: {MODEL_NAME}")
596
+ print(f"πŸ”— Endpoint: https://router.huggingface.co/v1/chat/completions")
597
 
598
+ if http_clients:
599
+ client_list = ', '.join([f"{c['name']} ({c['model']})" for c in http_clients])
600
+ print(f"πŸ“‘ Available HTTP clients: {client_list}")
601
+ print(f"πŸ”„ Automatic failover enabled across {len(http_clients)} HTTP clients")
602
  else:
603
+ print("❌ No HTTP clients initialized - check HF_TOKEN")
604
 
605
  print(f"🌍 Global analysis across {len(GLOBAL_REGIONS)} regions")
606
  print(f"πŸ”‘ Using HuggingFace Token: {'βœ… Valid' if HF_TOKEN and len(HF_TOKEN) > 10 else '❌ Missing'}")
607
 
608
+ loading_status = f"AEGIS BIO LAB {AEGIS_VERSION} CONDUCTOR ready with DeepSeek-V3.2-Exp via HuggingFace Router HTTP"
609
  print("βœ… AEGIS BIO LAB 10 CONDUCTOR ready!")
610
 
611
  if __name__ == '__main__':