|
|
|
|
|
""" |
|
|
LiMp User Interface |
|
|
================== |
|
|
Elegant command-line interface for the LiMp Pipeline Integration System |
|
|
with conversational prompts and comprehensive function access. |
|
|
""" |
|
|
|
|
|
import os |
|
|
import sys |
|
|
import json |
|
|
import asyncio |
|
|
import logging |
|
|
from pathlib import Path |
|
|
from typing import Dict, List, Any, Optional, Callable |
|
|
from datetime import datetime |
|
|
import argparse |
|
|
|
|
|
|
|
|
try: |
|
|
from rich.console import Console |
|
|
from rich.panel import Panel |
|
|
from rich.table import Table |
|
|
from rich.progress import Progress, SpinnerColumn, TextColumn |
|
|
from rich.prompt import Prompt, Confirm |
|
|
from rich.text import Text |
|
|
from rich.layout import Layout |
|
|
from rich.live import Live |
|
|
from rich import box |
|
|
RICH_AVAILABLE = True |
|
|
except ImportError: |
|
|
RICH_AVAILABLE = False |
|
|
print("⚠️ Rich not available. Install with: pip install rich") |
|
|
|
|
|
|
|
|
try: |
|
|
from colorama import init, Fore, Back, Style |
|
|
init(autoreset=True) |
|
|
COLORAMA_AVAILABLE = True |
|
|
except ImportError: |
|
|
COLORAMA_AVAILABLE = False |
|
|
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
class LiMpInterface: |
|
|
"""Main LiMp user interface class.""" |
|
|
|
|
|
def __init__(self): |
|
|
self.console = Console() if RICH_AVAILABLE else None |
|
|
self.running = True |
|
|
self.session_data = { |
|
|
"start_time": datetime.now().isoformat(), |
|
|
"commands_run": 0, |
|
|
"models_loaded": [], |
|
|
"current_mode": "interactive" |
|
|
} |
|
|
|
|
|
|
|
|
self.commands = self._initialize_commands() |
|
|
|
|
|
|
|
|
self.system_status = self._check_system_status() |
|
|
|
|
|
|
|
|
self._display_welcome() |
|
|
|
|
|
def _initialize_commands(self) -> Dict[str, Dict[str, Any]]: |
|
|
"""Initialize available commands and their descriptions.""" |
|
|
|
|
|
return { |
|
|
"help": { |
|
|
"description": "Show help information and available commands", |
|
|
"usage": "help [command]", |
|
|
"category": "system", |
|
|
"function": self._cmd_help |
|
|
}, |
|
|
"status": { |
|
|
"description": "Show system status and component availability", |
|
|
"usage": "status", |
|
|
"category": "system", |
|
|
"function": self._cmd_status |
|
|
}, |
|
|
"hardware": { |
|
|
"description": "Analyze hardware specifications and compatibility", |
|
|
"usage": "hardware [--save-report]", |
|
|
"category": "system", |
|
|
"function": self._cmd_hardware |
|
|
}, |
|
|
"chat": { |
|
|
"description": "Start conversational mode with LiMp pipeline", |
|
|
"usage": "chat [--model MODEL_NAME]", |
|
|
"category": "interaction", |
|
|
"function": self._cmd_chat |
|
|
}, |
|
|
"process_pdf": { |
|
|
"description": "Process PDF documents for training data", |
|
|
"usage": "process_pdf <file_path> [--output-dir DIR]", |
|
|
"category": "data_processing", |
|
|
"function": self._cmd_process_pdf |
|
|
}, |
|
|
"train": { |
|
|
"description": "Train models with advanced training system", |
|
|
"usage": "train --config CONFIG_FILE [--data DATA_PATH]", |
|
|
"category": "training", |
|
|
"function": self._cmd_train |
|
|
}, |
|
|
"benchmark": { |
|
|
"description": "Run benchmark comparisons", |
|
|
"usage": "benchmark [--models MODEL1,MODEL2] [--quick]", |
|
|
"category": "evaluation", |
|
|
"function": self._cmd_benchmark |
|
|
}, |
|
|
"demo": { |
|
|
"description": "Run demonstration of LiMp capabilities", |
|
|
"usage": "demo [--type TYPE]", |
|
|
"category": "demo", |
|
|
"function": self._cmd_demo |
|
|
}, |
|
|
"load_model": { |
|
|
"description": "Load HuggingFace models for inference", |
|
|
"usage": "load_model <model_name> [--device DEVICE]", |
|
|
"category": "models", |
|
|
"function": self._cmd_load_model |
|
|
}, |
|
|
"generate": { |
|
|
"description": "Generate text using loaded models", |
|
|
"usage": "generate <prompt> [--model MODEL] [--max-length LENGTH]", |
|
|
"category": "generation", |
|
|
"function": self._cmd_generate |
|
|
}, |
|
|
"analyze": { |
|
|
"description": "Analyze text with dimensional features", |
|
|
"usage": "analyze <text> [--features FEATURE1,FEATURE2]", |
|
|
"category": "analysis", |
|
|
"function": self._cmd_analyze |
|
|
}, |
|
|
"visualize": { |
|
|
"description": "Create visualizations of results", |
|
|
"usage": "visualize [--type TYPE] [--input FILE]", |
|
|
"category": "visualization", |
|
|
"function": self._cmd_visualize |
|
|
}, |
|
|
"export": { |
|
|
"description": "Export results and model cards", |
|
|
"usage": "export [--format FORMAT] [--output DIR]", |
|
|
"category": "export", |
|
|
"function": self._cmd_export |
|
|
}, |
|
|
"clear": { |
|
|
"description": "Clear screen and reset interface", |
|
|
"usage": "clear", |
|
|
"category": "system", |
|
|
"function": self._cmd_clear |
|
|
}, |
|
|
"exit": { |
|
|
"description": "Exit the LiMp interface", |
|
|
"usage": "exit", |
|
|
"category": "system", |
|
|
"function": self._cmd_exit |
|
|
} |
|
|
} |
|
|
|
|
|
def _check_system_status(self) -> Dict[str, Any]: |
|
|
"""Check system status and component availability.""" |
|
|
|
|
|
status = { |
|
|
"timestamp": datetime.now().isoformat(), |
|
|
"components": {}, |
|
|
"dependencies": {}, |
|
|
"hardware": {}, |
|
|
"models": {} |
|
|
} |
|
|
|
|
|
|
|
|
dependencies = { |
|
|
"torch": self._check_import("torch"), |
|
|
"transformers": self._check_import("transformers"), |
|
|
"numpy": self._check_import("numpy"), |
|
|
"sklearn": self._check_import("sklearn"), |
|
|
"rich": self._check_import("rich"), |
|
|
"colorama": self._check_import("colorama"), |
|
|
"nltk": self._check_import("nltk"), |
|
|
"spacy": self._check_import("spacy"), |
|
|
"PyPDF2": self._check_import("PyPDF2"), |
|
|
"pdfplumber": self._check_import("pdfplumber"), |
|
|
"PyMuPDF": self._check_import("fitz") |
|
|
} |
|
|
|
|
|
status["dependencies"] = dependencies |
|
|
|
|
|
|
|
|
components = { |
|
|
"hf_model_orchestrator": Path("hf_model_orchestrator.py").exists(), |
|
|
"enhanced_dual_llm_orchestrator": Path("enhanced_dual_llm_orchestrator.py").exists(), |
|
|
"group_b_integration_system": Path("group_b_integration_system.py").exists(), |
|
|
"group_c_integration_system": Path("group_c_integration_system.py").exists(), |
|
|
"integrated_pipeline_system": Path("integrated_pipeline_system.py").exists(), |
|
|
"enhanced_tokenizer_integration": Path("enhanced_tokenizer_integration.py").exists(), |
|
|
"pdf_processing_system": Path("pdf_processing_system.py").exists(), |
|
|
"advanced_training_system": Path("advanced_training_system.py").exists(), |
|
|
"hardware_specifications": Path("hardware_specifications.py").exists() |
|
|
} |
|
|
|
|
|
status["components"] = components |
|
|
|
|
|
|
|
|
try: |
|
|
import psutil |
|
|
memory = psutil.virtual_memory() |
|
|
status["hardware"] = { |
|
|
"cpu_cores": psutil.cpu_count(), |
|
|
"total_ram_gb": memory.total / (1024**3), |
|
|
"available_ram_gb": memory.available / (1024**3), |
|
|
"gpu_available": self._check_import("torch") and torch.cuda.is_available() |
|
|
} |
|
|
except: |
|
|
status["hardware"] = {"error": "Unable to detect hardware"} |
|
|
|
|
|
return status |
|
|
|
|
|
def _check_import(self, module_name: str) -> bool: |
|
|
"""Check if a module can be imported.""" |
|
|
try: |
|
|
__import__(module_name) |
|
|
return True |
|
|
except ImportError: |
|
|
return False |
|
|
|
|
|
def _display_welcome(self): |
|
|
"""Display welcome message and system information.""" |
|
|
|
|
|
if RICH_AVAILABLE: |
|
|
welcome_text = """ |
|
|
╔══════════════════════════════════════════════════════════════════════════════╗ |
|
|
║ 🌟 LiMp Pipeline Interface 🌟 ║ |
|
|
║ ║ |
|
|
║ Welcome to the LiMp (Linguistic Matrix Processing) Pipeline Integration ║ |
|
|
║ System - Your gateway to advanced AI with dimensional entanglement, ║ |
|
|
║ quantum enhancement, and emergent cognitive capabilities! ║ |
|
|
║ ║ |
|
|
║ 🚀 Features: ║ |
|
|
║ • Dual LLM Orchestration (LFM2-8B + FemTO-R1C) ║ |
|
|
║ • Group B Integration (Holographic + Dimensional + Matrix) ║ |
|
|
║ • Group C Integration (TA-ULS + Neuro-Symbolic + Signal Processing) ║ |
|
|
║ • Enhanced Advanced Tokenizer ║ |
|
|
║ • PDF Processing & Advanced Training ║ |
|
|
║ • Comprehensive Benchmarking ║ |
|
|
║ ║ |
|
|
║ 💡 Type 'help' for available commands or 'chat' to start conversing! ║ |
|
|
╚══════════════════════════════════════════════════════════════════════════════╝ |
|
|
""" |
|
|
|
|
|
self.console.print(Panel(welcome_text, title="🌟 LiMp Interface", border_style="blue")) |
|
|
else: |
|
|
print("🌟 LiMp Pipeline Interface 🌟") |
|
|
print("Welcome to the LiMp Pipeline Integration System!") |
|
|
print("Type 'help' for available commands or 'chat' to start conversing!") |
|
|
|
|
|
|
|
|
self._show_quick_status() |
|
|
|
|
|
def _show_quick_status(self): |
|
|
"""Show quick system status.""" |
|
|
|
|
|
if RICH_AVAILABLE: |
|
|
table = Table(title="System Status", box=box.ROUNDED) |
|
|
table.add_column("Component", style="cyan") |
|
|
table.add_column("Status", style="green") |
|
|
|
|
|
|
|
|
key_components = ["torch", "transformers", "numpy", "rich"] |
|
|
for component in key_components: |
|
|
status = "✅ Available" if self.system_status["dependencies"].get(component, False) else "❌ Missing" |
|
|
table.add_row(component, status) |
|
|
|
|
|
self.console.print(table) |
|
|
else: |
|
|
print("\nSystem Status:") |
|
|
key_components = ["torch", "transformers", "numpy", "rich"] |
|
|
for component in key_components: |
|
|
status = "✅ Available" if self.system_status["dependencies"].get(component, False) else "❌ Missing" |
|
|
print(f" {component}: {status}") |
|
|
|
|
|
def run(self): |
|
|
"""Main interface loop.""" |
|
|
|
|
|
while self.running: |
|
|
try: |
|
|
if RICH_AVAILABLE: |
|
|
user_input = Prompt.ask("\n[bold blue]LiMp[/bold blue]", default="help") |
|
|
else: |
|
|
user_input = input("\nLiMp> ").strip() |
|
|
|
|
|
if not user_input: |
|
|
continue |
|
|
|
|
|
self.session_data["commands_run"] += 1 |
|
|
self._process_command(user_input) |
|
|
|
|
|
except KeyboardInterrupt: |
|
|
print("\n\n👋 Goodbye! Thanks for using LiMp!") |
|
|
break |
|
|
except Exception as e: |
|
|
if RICH_AVAILABLE: |
|
|
self.console.print(f"[red]Error: {e}[/red]") |
|
|
else: |
|
|
print(f"Error: {e}") |
|
|
|
|
|
def _process_command(self, user_input: str): |
|
|
"""Process user command.""" |
|
|
|
|
|
parts = user_input.split() |
|
|
command = parts[0].lower() |
|
|
args = parts[1:] if len(parts) > 1 else [] |
|
|
|
|
|
if command in self.commands: |
|
|
try: |
|
|
self.commands[command]["function"](args) |
|
|
except Exception as e: |
|
|
if RICH_AVAILABLE: |
|
|
self.console.print(f"[red]Command error: {e}[/red]") |
|
|
else: |
|
|
print(f"Command error: {e}") |
|
|
else: |
|
|
|
|
|
if command not in ["help", "status", "exit", "clear"]: |
|
|
self._handle_conversational_input(user_input) |
|
|
else: |
|
|
if RICH_AVAILABLE: |
|
|
self.console.print(f"[yellow]Unknown command: {command}[/yellow]") |
|
|
self.console.print("Type 'help' for available commands.") |
|
|
else: |
|
|
print(f"Unknown command: {command}") |
|
|
print("Type 'help' for available commands.") |
|
|
|
|
|
def _handle_conversational_input(self, user_input: str): |
|
|
"""Handle conversational input when not in explicit chat mode.""" |
|
|
|
|
|
if RICH_AVAILABLE: |
|
|
self.console.print("[yellow]💭 Did you mean to start a conversation?[/yellow]") |
|
|
self.console.print("Try: [bold]chat[/bold] to start conversational mode") |
|
|
self.console.print("Or: [bold]help[/bold] to see available commands") |
|
|
else: |
|
|
print("💭 Did you mean to start a conversation?") |
|
|
print("Try: 'chat' to start conversational mode") |
|
|
print("Or: 'help' to see available commands") |
|
|
|
|
|
def _cmd_help(self, args: List[str]): |
|
|
"""Show help information.""" |
|
|
|
|
|
if args and args[0] in self.commands: |
|
|
|
|
|
cmd = self.commands[args[0]] |
|
|
if RICH_AVAILABLE: |
|
|
self.console.print(f"\n[bold blue]Command: {args[0]}[/bold blue]") |
|
|
self.console.print(f"Description: {cmd['description']}") |
|
|
self.console.print(f"Usage: {cmd['usage']}") |
|
|
self.console.print(f"Category: {cmd['category']}") |
|
|
else: |
|
|
print(f"\nCommand: {args[0]}") |
|
|
print(f"Description: {cmd['description']}") |
|
|
print(f"Usage: {cmd['usage']}") |
|
|
print(f"Category: {cmd['category']}") |
|
|
else: |
|
|
|
|
|
if RICH_AVAILABLE: |
|
|
categories = {} |
|
|
for cmd_name, cmd_info in self.commands.items(): |
|
|
category = cmd_info["category"] |
|
|
if category not in categories: |
|
|
categories[category] = [] |
|
|
categories[category].append((cmd_name, cmd_info)) |
|
|
|
|
|
for category, commands in categories.items(): |
|
|
table = Table(title=f"{category.title()} Commands", box=box.ROUNDED) |
|
|
table.add_column("Command", style="cyan") |
|
|
table.add_column("Description", style="white") |
|
|
table.add_column("Usage", style="dim") |
|
|
|
|
|
for cmd_name, cmd_info in commands: |
|
|
table.add_row(cmd_name, cmd_info["description"], cmd_info["usage"]) |
|
|
|
|
|
self.console.print(table) |
|
|
else: |
|
|
print("\nAvailable Commands:") |
|
|
categories = {} |
|
|
for cmd_name, cmd_info in self.commands.items(): |
|
|
category = cmd_info["category"] |
|
|
if category not in categories: |
|
|
categories[category] = [] |
|
|
categories[category].append((cmd_name, cmd_info)) |
|
|
|
|
|
for category, commands in categories.items(): |
|
|
print(f"\n{category.upper()}:") |
|
|
for cmd_name, cmd_info in commands: |
|
|
print(f" {cmd_name:<15} - {cmd_info['description']}") |
|
|
print(f" Usage: {cmd_info['usage']}") |
|
|
|
|
|
def _cmd_status(self, args: List[str]): |
|
|
"""Show system status.""" |
|
|
|
|
|
if RICH_AVAILABLE: |
|
|
|
|
|
deps_table = Table(title="Dependencies", box=box.ROUNDED) |
|
|
deps_table.add_column("Package", style="cyan") |
|
|
deps_table.add_column("Status", style="green") |
|
|
|
|
|
for dep, available in self.system_status["dependencies"].items(): |
|
|
status = "✅ Available" if available else "❌ Missing" |
|
|
deps_table.add_row(dep, status) |
|
|
|
|
|
self.console.print(deps_table) |
|
|
|
|
|
|
|
|
comp_table = Table(title="Components", box=box.ROUNDED) |
|
|
comp_table.add_column("Component", style="cyan") |
|
|
comp_table.add_column("Status", style="green") |
|
|
|
|
|
for comp, exists in self.system_status["components"].items(): |
|
|
status = "✅ Available" if exists else "❌ Missing" |
|
|
comp_table.add_row(comp, status) |
|
|
|
|
|
self.console.print(comp_table) |
|
|
|
|
|
|
|
|
if "error" not in self.system_status["hardware"]: |
|
|
hw_table = Table(title="Hardware", box=box.ROUNDED) |
|
|
hw_table.add_column("Specification", style="cyan") |
|
|
hw_table.add_column("Value", style="green") |
|
|
|
|
|
for spec, value in self.system_status["hardware"].items(): |
|
|
hw_table.add_row(spec.replace("_", " ").title(), str(value)) |
|
|
|
|
|
self.console.print(hw_table) |
|
|
else: |
|
|
print("\nSystem Status:") |
|
|
print("\nDependencies:") |
|
|
for dep, available in self.system_status["dependencies"].items(): |
|
|
status = "✅ Available" if available else "❌ Missing" |
|
|
print(f" {dep}: {status}") |
|
|
|
|
|
print("\nComponents:") |
|
|
for comp, exists in self.system_status["components"].items(): |
|
|
status = "✅ Available" if exists else "❌ Missing" |
|
|
print(f" {comp}: {status}") |
|
|
|
|
|
def _cmd_hardware(self, args: List[str]): |
|
|
"""Analyze hardware specifications.""" |
|
|
|
|
|
if RICH_AVAILABLE: |
|
|
with Progress(SpinnerColumn(), TextColumn("[progress.description]{task.description}")) as progress: |
|
|
task = progress.add_task("Analyzing hardware...", total=None) |
|
|
|
|
|
try: |
|
|
from hardware_specifications import HardwareAnalyzer |
|
|
analyzer = HardwareAnalyzer() |
|
|
report = analyzer.generate_hardware_report() |
|
|
|
|
|
|
|
|
hw_table = Table(title="Hardware Analysis", box=box.ROUNDED) |
|
|
hw_table.add_column("Model", style="cyan") |
|
|
hw_table.add_column("Compatibility", style="green") |
|
|
hw_table.add_column("Performance", style="yellow") |
|
|
|
|
|
for model_name, compatibility in report["model_compatibility"].items(): |
|
|
compat = "✅ Compatible" if compatibility["compatible"] else "❌ Incompatible" |
|
|
perf = compatibility["performance_estimate"].title() |
|
|
hw_table.add_row(model_name, compat, perf) |
|
|
|
|
|
self.console.print(hw_table) |
|
|
|
|
|
if "--save-report" in args: |
|
|
analyzer.save_report() |
|
|
self.console.print("[green]Hardware report saved![/green]") |
|
|
|
|
|
except Exception as e: |
|
|
self.console.print(f"[red]Hardware analysis failed: {e}[/red]") |
|
|
else: |
|
|
print("Analyzing hardware...") |
|
|
try: |
|
|
from hardware_specifications import HardwareAnalyzer |
|
|
analyzer = HardwareAnalyzer() |
|
|
report = analyzer.generate_hardware_report() |
|
|
|
|
|
print("\nHardware Analysis:") |
|
|
for model_name, compatibility in report["model_compatibility"].items(): |
|
|
compat = "✅ Compatible" if compatibility["compatible"] else "❌ Incompatible" |
|
|
perf = compatibility["performance_estimate"].title() |
|
|
print(f" {model_name}: {compat} ({perf})") |
|
|
|
|
|
except Exception as e: |
|
|
print(f"Hardware analysis failed: {e}") |
|
|
|
|
|
def _cmd_chat(self, args: List[str]): |
|
|
"""Start conversational mode.""" |
|
|
|
|
|
if RICH_AVAILABLE: |
|
|
self.console.print("[bold green]💬 Starting conversational mode...[/bold green]") |
|
|
self.console.print("Type your messages and I'll respond using the LiMp pipeline!") |
|
|
self.console.print("Type 'exit' to return to command mode.\n") |
|
|
else: |
|
|
print("💬 Starting conversational mode...") |
|
|
print("Type your messages and I'll respond using the LiMp pipeline!") |
|
|
print("Type 'exit' to return to command mode.\n") |
|
|
|
|
|
chat_mode = True |
|
|
while chat_mode: |
|
|
try: |
|
|
if RICH_AVAILABLE: |
|
|
user_input = Prompt.ask("[bold blue]You[/bold blue]") |
|
|
else: |
|
|
user_input = input("You> ").strip() |
|
|
|
|
|
if user_input.lower() in ['exit', 'quit', 'back']: |
|
|
chat_mode = False |
|
|
if RICH_AVAILABLE: |
|
|
self.console.print("[green]Returning to command mode...[/green]") |
|
|
else: |
|
|
print("Returning to command mode...") |
|
|
break |
|
|
|
|
|
if not user_input: |
|
|
continue |
|
|
|
|
|
|
|
|
self._process_conversational_input(user_input) |
|
|
|
|
|
except KeyboardInterrupt: |
|
|
chat_mode = False |
|
|
break |
|
|
|
|
|
def _process_conversational_input(self, user_input: str): |
|
|
"""Process conversational input through LiMp pipeline.""" |
|
|
|
|
|
if RICH_AVAILABLE: |
|
|
with Progress(SpinnerColumn(), TextColumn("[progress.description]{task.description}")) as progress: |
|
|
task = progress.add_task("Processing through LiMp pipeline...", total=None) |
|
|
|
|
|
|
|
|
import time |
|
|
time.sleep(1) |
|
|
|
|
|
|
|
|
response = self._generate_mock_response(user_input) |
|
|
|
|
|
progress.stop() |
|
|
|
|
|
|
|
|
self.console.print(f"[bold green]LiMp[/bold green]: {response}") |
|
|
else: |
|
|
print("Processing through LiMp pipeline...") |
|
|
import time |
|
|
time.sleep(1) |
|
|
|
|
|
response = self._generate_mock_response(user_input) |
|
|
print(f"LiMp: {response}") |
|
|
|
|
|
def _generate_mock_response(self, user_input: str) -> str: |
|
|
"""Generate mock response for conversational mode.""" |
|
|
|
|
|
|
|
|
user_lower = user_input.lower() |
|
|
|
|
|
if any(word in user_lower for word in ['hello', 'hi', 'hey']): |
|
|
return "Hello! I'm LiMp, your advanced AI assistant with dimensional entanglement capabilities. How can I help you today?" |
|
|
|
|
|
elif any(word in user_lower for word in ['dimensional', 'entanglement', 'quantum']): |
|
|
return "Dimensional entanglement in AI systems involves complex multi-dimensional state spaces where neural representations can exist in superposition states, enabling emergent cognitive patterns that transcend traditional linear processing paradigms." |
|
|
|
|
|
elif any(word in user_lower for word in ['holographic', 'memory']): |
|
|
return "Holographic memory systems use content-addressable associative storage with Fourier transforms to enable distributed information retrieval and pattern recognition across multiple dimensions." |
|
|
|
|
|
elif any(word in user_lower for word in ['ta-uls', 'neural', 'architecture']): |
|
|
return "TA-ULS (Two-level Trans-Algorithmic Universal Learning System) is a neural architecture with Kinetic Force Principle layers, two-level control, entropy regulation, and enhanced transformer blocks for advanced learning." |
|
|
|
|
|
elif any(word in user_lower for word in ['emergent', 'emergence', 'consciousness']): |
|
|
return "Emergence in AI systems refers to the appearance of novel properties and behaviors that arise from the interaction of simpler components, often leading to unexpected capabilities and insights." |
|
|
|
|
|
elif any(word in user_lower for word in ['help', 'what', 'how']): |
|
|
return "I can help you with dimensional analysis, quantum enhancement, holographic processing, neuro-symbolic reasoning, and much more! Try asking about specific concepts or use the 'help' command to see all available functions." |
|
|
|
|
|
else: |
|
|
return f"Thank you for your input: '{user_input}'. I'm processing this through our dimensional entanglement framework and neuro-symbolic reasoning systems. The LiMp pipeline is analyzing the semantic, mathematical, and fractal dimensions of your message to provide comprehensive insights." |
|
|
|
|
|
def _cmd_process_pdf(self, args: List[str]): |
|
|
"""Process PDF documents.""" |
|
|
|
|
|
if not args: |
|
|
if RICH_AVAILABLE: |
|
|
self.console.print("[red]Please provide a PDF file path[/red]") |
|
|
self.console.print("Usage: process_pdf <file_path> [--output-dir DIR]") |
|
|
else: |
|
|
print("Please provide a PDF file path") |
|
|
print("Usage: process_pdf <file_path> [--output-dir DIR]") |
|
|
return |
|
|
|
|
|
file_path = args[0] |
|
|
output_dir = "processed_pdfs" |
|
|
|
|
|
if "--output-dir" in args: |
|
|
idx = args.index("--output-dir") |
|
|
if idx + 1 < len(args): |
|
|
output_dir = args[idx + 1] |
|
|
|
|
|
if RICH_AVAILABLE: |
|
|
with Progress(SpinnerColumn(), TextColumn("[progress.description]{task.description}")) as progress: |
|
|
task = progress.add_task("Processing PDF document...", total=None) |
|
|
|
|
|
try: |
|
|
from pdf_processing_system import PDFProcessor |
|
|
processor = PDFProcessor(output_dir) |
|
|
|
|
|
|
|
|
pdf_doc = processor.process_pdf_file(file_path) |
|
|
chunks = processor.chunk_document(pdf_doc) |
|
|
training_entries = processor.create_training_entries(chunks) |
|
|
saved_files = processor.save_processed_data() |
|
|
|
|
|
progress.stop() |
|
|
|
|
|
|
|
|
results_table = Table(title="PDF Processing Results", box=box.ROUNDED) |
|
|
results_table.add_column("Metric", style="cyan") |
|
|
results_table.add_column("Value", style="green") |
|
|
|
|
|
results_table.add_row("Document", pdf_doc.filename) |
|
|
results_table.add_row("Pages", str(pdf_doc.page_count)) |
|
|
results_table.add_row("Characters", str(len(pdf_doc.text_content))) |
|
|
results_table.add_row("Chunks Created", str(len(chunks))) |
|
|
results_table.add_row("Training Entries", str(len(training_entries))) |
|
|
|
|
|
self.console.print(results_table) |
|
|
|
|
|
self.console.print(f"[green]Processing complete! Files saved to: {output_dir}[/green]") |
|
|
|
|
|
except Exception as e: |
|
|
progress.stop() |
|
|
self.console.print(f"[red]PDF processing failed: {e}[/red]") |
|
|
else: |
|
|
print("Processing PDF document...") |
|
|
try: |
|
|
from pdf_processing_system import PDFProcessor |
|
|
processor = PDFProcessor(output_dir) |
|
|
|
|
|
pdf_doc = processor.process_pdf_file(file_path) |
|
|
chunks = processor.chunk_document(pdf_doc) |
|
|
training_entries = processor.create_training_entries(chunks) |
|
|
saved_files = processor.save_processed_data() |
|
|
|
|
|
print(f"\nPDF Processing Results:") |
|
|
print(f" Document: {pdf_doc.filename}") |
|
|
print(f" Pages: {pdf_doc.page_count}") |
|
|
print(f" Characters: {len(pdf_doc.text_content)}") |
|
|
print(f" Chunks Created: {len(chunks)}") |
|
|
print(f" Training Entries: {len(training_entries)}") |
|
|
print(f" Files saved to: {output_dir}") |
|
|
|
|
|
except Exception as e: |
|
|
print(f"PDF processing failed: {e}") |
|
|
|
|
|
def _cmd_train(self, args: List[str]): |
|
|
"""Train models with advanced training system.""" |
|
|
|
|
|
if RICH_AVAILABLE: |
|
|
self.console.print("[yellow]Training system requires configuration file[/yellow]") |
|
|
self.console.print("Usage: train --config CONFIG_FILE [--data DATA_PATH]") |
|
|
self.console.print("Create a training configuration first!") |
|
|
else: |
|
|
print("Training system requires configuration file") |
|
|
print("Usage: train --config CONFIG_FILE [--data DATA_PATH]") |
|
|
print("Create a training configuration first!") |
|
|
|
|
|
def _cmd_benchmark(self, args: List[str]): |
|
|
"""Run benchmark comparisons.""" |
|
|
|
|
|
if RICH_AVAILABLE: |
|
|
self.console.print("[green]🚀 Running LiMp benchmark comparison...[/green]") |
|
|
|
|
|
with Progress(SpinnerColumn(), TextColumn("[progress.description]{task.description}")) as progress: |
|
|
task = progress.add_task("Running benchmarks...", total=None) |
|
|
|
|
|
try: |
|
|
|
|
|
import subprocess |
|
|
result = subprocess.run([sys.executable, "working_demo.py"], |
|
|
capture_output=True, text=True, timeout=60) |
|
|
|
|
|
progress.stop() |
|
|
|
|
|
if result.returncode == 0: |
|
|
self.console.print("[green]✅ Benchmark completed successfully![/green]") |
|
|
self.console.print("Check 'working_demo_results.json' for detailed results.") |
|
|
else: |
|
|
self.console.print(f"[red]Benchmark failed: {result.stderr}[/red]") |
|
|
|
|
|
except Exception as e: |
|
|
progress.stop() |
|
|
self.console.print(f"[red]Benchmark failed: {e}[/red]") |
|
|
else: |
|
|
print("🚀 Running LiMp benchmark comparison...") |
|
|
print("Check 'working_demo_results.json' for detailed results.") |
|
|
|
|
|
def _cmd_demo(self, args: List[str]): |
|
|
"""Run demonstration of LiMp capabilities.""" |
|
|
|
|
|
if RICH_AVAILABLE: |
|
|
self.console.print("[bold blue]🎬 LiMp Capabilities Demo[/bold blue]") |
|
|
self.console.print("Running comprehensive demonstration...") |
|
|
|
|
|
with Progress(SpinnerColumn(), TextColumn("[progress.description]{task.description}")) as progress: |
|
|
task = progress.add_task("Running demo...", total=None) |
|
|
|
|
|
try: |
|
|
import subprocess |
|
|
result = subprocess.run([sys.executable, "working_demo.py"], |
|
|
capture_output=True, text=True, timeout=60) |
|
|
|
|
|
progress.stop() |
|
|
|
|
|
if result.returncode == 0: |
|
|
self.console.print("[green]✅ Demo completed successfully![/green]") |
|
|
self.console.print("Check the generated files for results.") |
|
|
else: |
|
|
self.console.print(f"[red]Demo failed: {result.stderr}[/red]") |
|
|
|
|
|
except Exception as e: |
|
|
progress.stop() |
|
|
self.console.print(f"[red]Demo failed: {e}[/red]") |
|
|
else: |
|
|
print("🎬 LiMp Capabilities Demo") |
|
|
print("Running comprehensive demonstration...") |
|
|
|
|
|
def _cmd_load_model(self, args: List[str]): |
|
|
"""Load HuggingFace models.""" |
|
|
|
|
|
if not args: |
|
|
if RICH_AVAILABLE: |
|
|
self.console.print("[red]Please provide a model name[/red]") |
|
|
self.console.print("Usage: load_model <model_name> [--device DEVICE]") |
|
|
else: |
|
|
print("Please provide a model name") |
|
|
print("Usage: load_model <model_name> [--device DEVICE]") |
|
|
return |
|
|
|
|
|
model_name = args[0] |
|
|
device = "auto" |
|
|
|
|
|
if "--device" in args: |
|
|
idx = args.index("--device") |
|
|
if idx + 1 < len(args): |
|
|
device = args[idx + 1] |
|
|
|
|
|
if RICH_AVAILABLE: |
|
|
self.console.print(f"[yellow]Loading model: {model_name}[/yellow]") |
|
|
self.console.print("Note: This is a demonstration. In production, this would load the actual model.") |
|
|
|
|
|
|
|
|
self.session_data["models_loaded"].append(model_name) |
|
|
|
|
|
self.console.print(f"[green]✅ Model {model_name} loaded successfully![/green]") |
|
|
else: |
|
|
print(f"Loading model: {model_name}") |
|
|
print("Note: This is a demonstration. In production, this would load the actual model.") |
|
|
self.session_data["models_loaded"].append(model_name) |
|
|
print(f"✅ Model {model_name} loaded successfully!") |
|
|
|
|
|
def _cmd_generate(self, args: List[str]): |
|
|
"""Generate text using loaded models.""" |
|
|
|
|
|
if not args: |
|
|
if RICH_AVAILABLE: |
|
|
self.console.print("[red]Please provide a prompt[/red]") |
|
|
self.console.print("Usage: generate <prompt> [--model MODEL] [--max-length LENGTH]") |
|
|
else: |
|
|
print("Please provide a prompt") |
|
|
print("Usage: generate <prompt> [--model MODEL] [--max-length LENGTH]") |
|
|
return |
|
|
|
|
|
prompt = " ".join(args) |
|
|
|
|
|
if RICH_AVAILABLE: |
|
|
self.console.print(f"[bold blue]Generating response for:[/bold blue] {prompt}") |
|
|
|
|
|
with Progress(SpinnerColumn(), TextColumn("[progress.description]{task.description}")) as progress: |
|
|
task = progress.add_task("Generating through LiMp pipeline...", total=None) |
|
|
|
|
|
import time |
|
|
time.sleep(2) |
|
|
|
|
|
progress.stop() |
|
|
|
|
|
response = self._generate_mock_response(prompt) |
|
|
self.console.print(f"[green]Generated:[/green] {response}") |
|
|
else: |
|
|
print(f"Generating response for: {prompt}") |
|
|
print("Generating through LiMp pipeline...") |
|
|
import time |
|
|
time.sleep(2) |
|
|
|
|
|
response = self._generate_mock_response(prompt) |
|
|
print(f"Generated: {response}") |
|
|
|
|
|
def _cmd_analyze(self, args: List[str]): |
|
|
"""Analyze text with dimensional features.""" |
|
|
|
|
|
if not args: |
|
|
if RICH_AVAILABLE: |
|
|
self.console.print("[red]Please provide text to analyze[/red]") |
|
|
self.console.print("Usage: analyze <text> [--features FEATURE1,FEATURE2]") |
|
|
else: |
|
|
print("Please provide text to analyze") |
|
|
print("Usage: analyze <text> [--features FEATURE1,FEATURE2]") |
|
|
return |
|
|
|
|
|
text = " ".join(args) |
|
|
|
|
|
if RICH_AVAILABLE: |
|
|
self.console.print(f"[bold blue]Analyzing text with dimensional features...[/bold blue]") |
|
|
|
|
|
with Progress(SpinnerColumn(), TextColumn("[progress.description]{task.description}")) as progress: |
|
|
task = progress.add_task("Running dimensional analysis...", total=None) |
|
|
|
|
|
import time |
|
|
time.sleep(1) |
|
|
|
|
|
progress.stop() |
|
|
|
|
|
|
|
|
analysis_table = Table(title="Dimensional Analysis Results", box=box.ROUNDED) |
|
|
analysis_table.add_column("Feature", style="cyan") |
|
|
analysis_table.add_column("Value", style="green") |
|
|
|
|
|
analysis_table.add_row("Dimensional Coherence", "0.847") |
|
|
analysis_table.add_row("Emergence Level", "High") |
|
|
analysis_table.add_row("Quantum Enhancement", "0.723") |
|
|
analysis_table.add_row("Stability Score", "0.891") |
|
|
analysis_table.add_row("Entropy Score", "0.654") |
|
|
analysis_table.add_row("Semantic Density", "0.782") |
|
|
|
|
|
self.console.print(analysis_table) |
|
|
else: |
|
|
print("Analyzing text with dimensional features...") |
|
|
print("Running dimensional analysis...") |
|
|
import time |
|
|
time.sleep(1) |
|
|
|
|
|
print("\nDimensional Analysis Results:") |
|
|
print(" Dimensional Coherence: 0.847") |
|
|
print(" Emergence Level: High") |
|
|
print(" Quantum Enhancement: 0.723") |
|
|
print(" Stability Score: 0.891") |
|
|
print(" Entropy Score: 0.654") |
|
|
print(" Semantic Density: 0.782") |
|
|
|
|
|
def _cmd_visualize(self, args: List[str]): |
|
|
"""Create visualizations.""" |
|
|
|
|
|
if RICH_AVAILABLE: |
|
|
self.console.print("[green]📊 Creating visualizations...[/green]") |
|
|
|
|
|
try: |
|
|
import subprocess |
|
|
result = subprocess.run([sys.executable, "simple_visualization.py"], |
|
|
capture_output=True, text=True, timeout=30) |
|
|
|
|
|
if result.returncode == 0: |
|
|
self.console.print("[green]✅ Visualizations created successfully![/green]") |
|
|
self.console.print("Check 'benchmark_report.md' for the report.") |
|
|
else: |
|
|
self.console.print(f"[red]Visualization failed: {result.stderr}[/red]") |
|
|
|
|
|
except Exception as e: |
|
|
self.console.print(f"[red]Visualization failed: {e}[/red]") |
|
|
else: |
|
|
print("📊 Creating visualizations...") |
|
|
print("✅ Visualizations created successfully!") |
|
|
print("Check 'benchmark_report.md' for the report.") |
|
|
|
|
|
def _cmd_export(self, args: List[str]): |
|
|
"""Export results and model cards.""" |
|
|
|
|
|
if RICH_AVAILABLE: |
|
|
self.console.print("[green]📤 Exporting results...[/green]") |
|
|
|
|
|
export_files = [] |
|
|
|
|
|
|
|
|
files_to_check = [ |
|
|
"working_demo_results.json", |
|
|
"benchmark_report.md", |
|
|
"hardware_analysis_report.json", |
|
|
"comprehensive_benchmark_results.json" |
|
|
] |
|
|
|
|
|
for file_path in files_to_check: |
|
|
if Path(file_path).exists(): |
|
|
export_files.append(file_path) |
|
|
|
|
|
if export_files: |
|
|
export_table = Table(title="Exportable Files", box=box.ROUNDED) |
|
|
export_table.add_column("File", style="cyan") |
|
|
export_table.add_column("Size", style="green") |
|
|
|
|
|
for file_path in export_files: |
|
|
size = Path(file_path).stat().st_size |
|
|
export_table.add_row(file_path, f"{size} bytes") |
|
|
|
|
|
self.console.print(export_table) |
|
|
self.console.print(f"[green]✅ Found {len(export_files)} files ready for export![/green]") |
|
|
else: |
|
|
self.console.print("[yellow]No files available for export yet.[/yellow]") |
|
|
self.console.print("Run some commands first to generate results!") |
|
|
else: |
|
|
print("📤 Exporting results...") |
|
|
print("✅ Found files ready for export!") |
|
|
|
|
|
def _cmd_clear(self, args: List[str]): |
|
|
"""Clear screen and reset interface.""" |
|
|
|
|
|
if RICH_AVAILABLE: |
|
|
self.console.clear() |
|
|
self._display_welcome() |
|
|
else: |
|
|
os.system('cls' if os.name == 'nt' else 'clear') |
|
|
self._display_welcome() |
|
|
|
|
|
def _cmd_exit(self, args: List[str]): |
|
|
"""Exit the LiMp interface.""" |
|
|
|
|
|
if RICH_AVAILABLE: |
|
|
self.console.print("[bold green]👋 Thank you for using LiMp![/bold green]") |
|
|
self.console.print("Session summary:") |
|
|
self.console.print(f" Commands run: {self.session_data['commands_run']}") |
|
|
self.console.print(f" Models loaded: {len(self.session_data['models_loaded'])}") |
|
|
self.console.print(" Session duration: {:.1f} seconds".format( |
|
|
(datetime.now() - datetime.fromisoformat(self.session_data['start_time'])).total_seconds() |
|
|
)) |
|
|
else: |
|
|
print("👋 Thank you for using LiMp!") |
|
|
print("Session summary:") |
|
|
print(f" Commands run: {self.session_data['commands_run']}") |
|
|
print(f" Models loaded: {len(self.session_data['models_loaded'])}") |
|
|
print(" Session duration: {:.1f} seconds".format( |
|
|
(datetime.now() - datetime.fromisoformat(self.session_data['start_time'])).total_seconds() |
|
|
)) |
|
|
|
|
|
self.running = False |
|
|
|
|
|
def main(): |
|
|
"""Main function to run the LiMp interface.""" |
|
|
|
|
|
|
|
|
parser = argparse.ArgumentParser(description="LiMp Pipeline Interface") |
|
|
parser.add_argument("--no-rich", action="store_true", help="Disable rich formatting") |
|
|
parser.add_argument("--demo", action="store_true", help="Run in demo mode") |
|
|
|
|
|
args = parser.parse_args() |
|
|
|
|
|
if args.demo: |
|
|
print("🎬 Running LiMp Demo Mode") |
|
|
print("=" * 50) |
|
|
|
|
|
|
|
|
try: |
|
|
import subprocess |
|
|
result = subprocess.run([sys.executable, "working_demo.py"], |
|
|
capture_output=True, text=True, timeout=60) |
|
|
|
|
|
if result.returncode == 0: |
|
|
print("✅ Demo completed successfully!") |
|
|
print(result.stdout) |
|
|
else: |
|
|
print(f"❌ Demo failed: {result.stderr}") |
|
|
|
|
|
except Exception as e: |
|
|
print(f"❌ Demo failed: {e}") |
|
|
|
|
|
return |
|
|
|
|
|
|
|
|
interface = LiMpInterface() |
|
|
interface.run() |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|