GitHub Copilot commited on
Commit
43840dc
·
1 Parent(s): 7fe60a7

Fix: Replace NeMo/OpenAI with Dolphin Agent (HF Inference)

Browse files
Files changed (2) hide show
  1. app.py +10 -9
  2. logos/connectors.py +51 -65
app.py CHANGED
@@ -391,25 +391,26 @@ with gr.Blocks(theme=gr.themes.Monochrome(), title="LOGOS SPCW Protocol") as dem
391
  history = history or []
392
  history.append((message, None))
393
 
394
- # Try NeMo connector first, fallback to simple response
395
  try:
396
  from logos.connectors import get_connector
397
- nemo = get_connector('nemo')
398
 
399
- # Add LOGOS context to prompt
400
- logos_context = f"""You are LOGOS, an AI assistant specialized in:
401
  - Prime Network Architecture (integer topology, GCD routing)
402
  - SPCW (Structured Prime Composite Waveform) protocol
403
  - Hex/Binary Dissolution and enterprise routing
404
  - Fractal addressing and heat code encoding
405
-
406
- User query: {message}"""
407
 
408
- response = nemo.chat(logos_context)
 
 
409
  history[-1] = (message, response)
410
  except Exception as e:
411
  # Fallback response
412
- history[-1] = (message, f"*[AI unavailable: {str(e)}]*\n\nTo enable AI:\n1. Get API key from [build.nvidia.com](https://build.nvidia.com)\n2. Set `NVIDIA_API_KEY` environment variable\n3. Install: `pip install nvidia-nat openai`")
413
 
414
  return history, ""
415
 
@@ -426,7 +427,7 @@ User query: {message}"""
426
  ('hf', 'Hugging Face'),
427
  ('ocr', 'EasyOCR'),
428
  ('vision', 'Vision (CV)'),
429
- ('nemo', 'NeMo Agent'),
430
  ('browser', 'Browser Automation')
431
  ]
432
 
 
391
  history = history or []
392
  history.append((message, None))
393
 
394
+ # Try Dolphin connector first, fallback to simple response
395
  try:
396
  from logos.connectors import get_connector
397
+ dolphin = get_connector('dolphin')
398
 
399
+ # LOGOS System Context
400
+ logos_context = """You are LOGOS, an AI assistant specialized in:
401
  - Prime Network Architecture (integer topology, GCD routing)
402
  - SPCW (Structured Prime Composite Waveform) protocol
403
  - Hex/Binary Dissolution and enterprise routing
404
  - Fractal addressing and heat code encoding
405
+ """
 
406
 
407
+ # Use Dolphin Chat
408
+ # My DolphinAgentConnector.chat takes (message, system_prompt)
409
+ response = dolphin.chat(message, system_prompt=logos_context)
410
  history[-1] = (message, response)
411
  except Exception as e:
412
  # Fallback response
413
+ history[-1] = (message, f"*[AI unavailable: {str(e)}]*\n\nTo enable AI:\n1. Ensure `HF_TOKEN` is set in Space secrets.\n2. Ensure `huggingface_hub` is installed.")
414
 
415
  return history, ""
416
 
 
427
  ('hf', 'Hugging Face'),
428
  ('ocr', 'EasyOCR'),
429
  ('vision', 'Vision (CV)'),
430
+ ('dolphin', 'Dolphin AI'),
431
  ('browser', 'Browser Automation')
432
  ]
433
 
logos/connectors.py CHANGED
@@ -219,82 +219,68 @@ class VisionConnector:
219
  raise ImportError(f"Required library not installed: {e}")
220
 
221
  # ==========================================
222
- # NEMO AGENT CONNECTOR (NVIDIA)
223
  # ==========================================
224
 
225
- class NeMoAgentConnector:
226
  """
227
- Adapter for NVIDIA NeMo Agent Toolkit.
228
- Provides ReAct agents, tool calling, and vision-language models.
229
  """
230
 
231
- def __init__(self, api_key: Optional[str] = None, base_url: str = "https://integrate.api.nvidia.com/v1"):
232
- self.api_key = api_key or os.environ.get('NVIDIA_API_KEY')
233
- self.base_url = base_url
234
- self._agent = None
235
 
236
- def _ensure_agent(self):
237
- """Lazy initialization of NeMo Agent."""
238
- if self._agent is None:
239
  try:
240
- from nat import Agent
241
- self._agent = Agent(api_key=self.api_key, base_url=self.base_url)
242
  except ImportError:
243
- raise ImportError("nvidia-nat not installed. Run: pip install nvidia-nat")
244
- return self._agent
245
 
246
- def chat(self, message: str, model: str = "nvidia/nemotron-3-nano-30b-a3b") -> str:
247
  """
248
- Chat with NeMo agent.
249
-
250
- Args:
251
- message: User message
252
- model: NVIDIA model ID
253
-
254
- Returns:
255
- Agent response
256
  """
257
  try:
258
- from openai import OpenAI
259
- client = OpenAI(api_key=self.api_key, base_url=self.base_url)
260
- response = client.chat.completions.create(
261
- model=model,
262
- messages=[{"role": "user", "content": message}]
263
- )
264
- return response.choices[0].message.content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265
  except Exception as e:
266
- return f"[NeMo Error] {e}"
267
 
268
- def analyze_diagram(self, image_path: str, prompt: str = "Describe this architectural diagram in detail.") -> str:
269
  """
270
- Analyze diagram/image using vision-language model.
271
-
272
- Args:
273
- image_path: Path to image
274
- prompt: Analysis prompt
275
-
276
- Returns:
277
- Analysis text
278
  """
279
  try:
280
- import base64
281
- from openai import OpenAI
282
-
283
- with open(image_path, 'rb') as f:
284
- image_data = base64.b64encode(f.read()).decode('utf-8')
285
-
286
- client = OpenAI(api_key=self.api_key, base_url=self.base_url)
287
- response = client.chat.completions.create(
288
- model="nvidia/nemotron-nano-12b-v2-vl",
289
- messages=[{
290
- "role": "user",
291
- "content": [
292
- {"type": "text", "text": prompt},
293
- {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{image_data}"}}
294
- ]
295
- }]
296
- )
297
- return response.choices[0].message.content
298
  except Exception as e:
299
  return f"[Vision Error] {e}"
300
 
@@ -442,7 +428,7 @@ def get_connector(connector_type: str, **kwargs) -> Any:
442
  'hf': HuggingFaceConnector,
443
  'ocr': OCRConnector,
444
  'vision': VisionConnector,
445
- 'nemo': NeMoAgentConnector,
446
  'browser': BrowserAutomationConnector
447
  }
448
 
@@ -475,11 +461,11 @@ AVAILABLE_CONNECTORS = {
475
  'requires': ['opencv-python-headless', 'scikit-image'],
476
  'env_vars': []
477
  },
478
- 'nemo': {
479
- 'name': 'NVIDIA NeMo Agent Toolkit',
480
- 'capabilities': ['chat', 'analyze_diagram', 'react_agent', 'tool_calling'],
481
- 'requires': ['nvidia-nat', 'openai'],
482
- 'env_vars': ['NVIDIA_API_KEY']
483
  },
484
  'browser': {
485
  'name': 'Browser Automation (smolagents)',
 
219
  raise ImportError(f"Required library not installed: {e}")
220
 
221
  # ==========================================
222
+ # DOLPHIN AGENT CONNECTOR (HF Inference)
223
  # ==========================================
224
 
225
+ class DolphinAgentConnector:
226
  """
227
+ Adapter for Dolphin AI (via Hugging Face Inference).
228
+ Replaces NeMo/OpenAI dependency with open weights.
229
  """
230
 
231
+ def __init__(self, model: str = "cognitivecomputations/dolphin-2.9-llama3-8b"):
232
+ self.model = model
233
+ self.config = ConnectorConfig.from_env()
234
+ self._client = None
235
 
236
+ def _ensure_client(self):
237
+ """Lazy initialization of HF Inference Client."""
238
+ if self._client is None:
239
  try:
240
+ from huggingface_hub import InferenceClient
241
+ self._client = InferenceClient(token=self.config.hf_token)
242
  except ImportError:
243
+ raise ImportError("huggingface_hub not installed.")
244
+ return self._client
245
 
246
+ def chat(self, message: str, system_prompt: str = None) -> str:
247
  """
248
+ Chat with Dolphin agent.
 
 
 
 
 
 
 
249
  """
250
  try:
251
+ client = self._ensure_client()
252
+
253
+ messages = []
254
+ if system_prompt:
255
+ messages.append({"role": "system", "content": system_prompt})
256
+ messages.append({"role": "user", "content": message})
257
+
258
+ # Using basic text generation if chat template fails, but try chat first
259
+ # Many HF models support chat_completion API via InferenceClient
260
+ try:
261
+ response = client.chat_completion(
262
+ messages=messages,
263
+ model=self.model,
264
+ max_tokens=500
265
+ )
266
+ return response.choices[0].message.content
267
+ except Exception:
268
+ # Fallback to text generation
269
+ prompt = f"<|im_start|>user\n{message}<|im_end|>\n<|im_start|>assistant\n"
270
+ return client.text_generation(prompt, model=self.model, max_new_tokens=500)
271
+
272
  except Exception as e:
273
+ return f"[Dolphin Error] {e}"
274
 
275
+ def analyze_diagram(self, image_path: str, prompt: str = "Describe this architectural diagram.") -> str:
276
  """
277
+ Analyze diagram using visual model (fallback to simple captioning if Dolphin is text-only).
 
 
 
 
 
 
 
278
  """
279
  try:
280
+ # Dolphin is text-only usually. Route to a Vision model.
281
+ from .connectors import get_connector # lazy import
282
+ hf = get_connector('hf')
283
+ return hf.image_to_text(image_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
284
  except Exception as e:
285
  return f"[Vision Error] {e}"
286
 
 
428
  'hf': HuggingFaceConnector,
429
  'ocr': OCRConnector,
430
  'vision': VisionConnector,
431
+ 'dolphin': DolphinAgentConnector,
432
  'browser': BrowserAutomationConnector
433
  }
434
 
 
461
  'requires': ['opencv-python-headless', 'scikit-image'],
462
  'env_vars': []
463
  },
464
+ 'dolphin': {
465
+ 'name': 'Dolphin AI (HF Inference)',
466
+ 'capabilities': ['chat', 'analyze_diagram'],
467
+ 'requires': ['huggingface_hub'],
468
+ 'env_vars': ['HF_TOKEN']
469
  },
470
  'browser': {
471
  'name': 'Browser Automation (smolagents)',