|
|
|
|
|
""" |
|
|
Elizabeth Interactive CLI with Tool/Function Calling |
|
|
|
|
|
- OpenAI Chat Completions–compatible client targeting vLLM |
|
|
- Provides the full Elizabeth MLOps toolkit as callable tools |
|
|
- Designed for local R&D: no guardrails beyond HTTP auth |
|
|
|
|
|
Defaults (override via flags or env): |
|
|
- Base URL: http://localhost:8000/v1 |
|
|
- Model: qwen3-8b-elizabeth (LOCKED) |
|
|
- API Key: elizabeth-secret-key-2025 |
|
|
|
|
|
Example: |
|
|
python -m mlops.elizabeth_cli \ |
|
|
--base-url http://localhost:8000/v1 \ |
|
|
--model qwen3-8b-elizabeth \ |
|
|
--thinking chain_of_thought |
|
|
|
|
|
While running, type your prompt and press Enter. Use commands: |
|
|
/exit Quit |
|
|
/clear Clear conversation |
|
|
/history Show message count |
|
|
/system ... Set/replace system prompt |
|
|
/save path Save transcript to a file |
|
|
|
|
|
This client supports tool/function calling. When the model returns tool_calls, |
|
|
the CLI executes the function locally, adds the tool result back to the chat, |
|
|
and continues until the model returns a normal message. |
|
|
""" |
|
|
|
|
|
from __future__ import annotations |
|
|
|
|
|
import argparse |
|
|
import json |
|
|
import os |
|
|
import sys |
|
|
import textwrap |
|
|
from dataclasses import dataclass |
|
|
import subprocess |
|
|
from datetime import datetime |
|
|
from typing import Any, Dict, List, Optional, Tuple |
|
|
|
|
|
import requests |
|
|
|
|
|
|
|
|
try: |
|
|
from session_store import SessionStore |
|
|
except Exception: |
|
|
SessionStore = None |
|
|
|
|
|
|
|
|
def _load_dotenv(paths: Optional[List[str]] = None) -> None: |
|
|
|
|
|
candidates = paths or [ |
|
|
os.path.join(os.getcwd(), ".env"), |
|
|
os.path.join(os.path.dirname(os.path.dirname(__file__)), ".env"), |
|
|
] |
|
|
for p in candidates: |
|
|
try: |
|
|
if os.path.exists(p): |
|
|
with open(p, "r", encoding="utf-8") as f: |
|
|
for line in f: |
|
|
s = line.strip() |
|
|
if not s or s.startswith("#") or "=" not in s: |
|
|
continue |
|
|
k, v = s.split("=", 1) |
|
|
if k and v and k not in os.environ: |
|
|
os.environ[k] = v |
|
|
except Exception: |
|
|
continue |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DEFAULT_BASE_URL = os.environ.get("ELIZABETH_BASE_URL", "http://localhost:8000/v1") |
|
|
|
|
|
DEFAULT_MODEL = "qwen3-8b-elizabeth" |
|
|
DEFAULT_API_KEY = os.environ.get("ELIZABETH_API_KEY", "elizabeth-secret-key-2025") |
|
|
|
|
|
|
|
|
PRESETS = { |
|
|
"chain_of_thought": { |
|
|
"temperature": 0.7, |
|
|
"top_p": 0.9, |
|
|
"max_tokens": 2048, |
|
|
"frequency_penalty": 0.1, |
|
|
"system": "Think step by step through complex problems.", |
|
|
}, |
|
|
"reflexion": { |
|
|
"temperature": 0.6, |
|
|
"top_p": 0.95, |
|
|
"max_tokens": 4096, |
|
|
"frequency_penalty": 0.05, |
|
|
"system": "Reflect on previous attempts and improve reasoning.", |
|
|
}, |
|
|
"tree_of_thoughts": { |
|
|
"temperature": 0.8, |
|
|
"top_p": 0.9, |
|
|
"max_tokens": 3072, |
|
|
"frequency_penalty": 0.1, |
|
|
"system": "Explore multiple reasoning paths and evaluate each.", |
|
|
}, |
|
|
} |
|
|
|
|
|
|
|
|
def _json_object(properties: Dict[str, Dict[str, Any]], required: Optional[List[str]] = None) -> Dict[str, Any]: |
|
|
return { |
|
|
"type": "object", |
|
|
"properties": properties, |
|
|
**({"required": required} if required else {}), |
|
|
"additionalProperties": False, |
|
|
} |
|
|
|
|
|
|
|
|
def get_elizabeth_tools() -> List[Dict[str, Any]]: |
|
|
"""OpenAI Tool schema for all 28 tools described in elizabeth_full_toolkit.md""" |
|
|
tools: List[Dict[str, Any]] = [] |
|
|
|
|
|
|
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "model_training", |
|
|
"description": "Train models with advanced configurations including LoRA, checkpointing, mixed precision", |
|
|
"parameters": _json_object({ |
|
|
"model_name": {"type": "string"}, |
|
|
"dataset_path": {"type": "string"}, |
|
|
"output_dir": {"type": "string", "default": "./outputs"}, |
|
|
"num_epochs": {"type": "integer", "default": 1}, |
|
|
"learning_rate": {"type": "number", "default": 5e-5}, |
|
|
"batch_size": {"type": "integer", "default": 1}, |
|
|
"warmup_steps": {"type": "integer", "default": 0}, |
|
|
"fp16": {"type": "boolean", "default": True}, |
|
|
"gradient_checkpointing": {"type": "boolean", "default": False}, |
|
|
}, ["model_name", "dataset_path"])}, |
|
|
}) |
|
|
|
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "hyperparameter_search", |
|
|
"description": "Advanced hyperparameter optimization with Optuna/Bayesian search", |
|
|
"parameters": _json_object({ |
|
|
"model_name": {"type": "string"}, |
|
|
"search_method": {"type": "string", "enum": ["optuna", "bayesian", "grid"]}, |
|
|
"n_trials": {"type": "integer", "default": 10}, |
|
|
"search_space": {"type": "object"}, |
|
|
}, ["model_name", "search_method"])}, |
|
|
}) |
|
|
|
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "dataset_preparation", |
|
|
"description": "Dataset preprocessing, tokenization, and quality filtering", |
|
|
"parameters": _json_object({ |
|
|
"dataset_path": {"type": "string"}, |
|
|
"tokenizer_name": {"type": "string"}, |
|
|
"max_length": {"type": "integer", "default": 2048}, |
|
|
"preprocessing_config": {"type": "object"}, |
|
|
}, ["dataset_path", "tokenizer_name"])}, |
|
|
}) |
|
|
|
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "model_evaluation", |
|
|
"description": "Model evaluation with multiple metrics and benchmarks", |
|
|
"parameters": _json_object({ |
|
|
"model_path": {"type": "string"}, |
|
|
"evaluation_dataset": {"type": "string"}, |
|
|
"metrics": {"type": "array", "items": {"type": "string"}}, |
|
|
"benchmark_tasks": {"type": "array", "items": {"type": "string"}}, |
|
|
}, ["model_path", "evaluation_dataset"])}, |
|
|
}) |
|
|
|
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "training_monitor", |
|
|
"description": "Real-time training monitoring with GPU utilization and early stopping", |
|
|
"parameters": _json_object({ |
|
|
"log_dir": {"type": "string", "default": "./runs"}, |
|
|
"metrics_to_track": {"type": "array", "items": {"type": "string"}}, |
|
|
"alert_thresholds": {"type": "object"}, |
|
|
"visualization_config": {"type": "object"}, |
|
|
})}, |
|
|
}) |
|
|
|
|
|
|
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "research_search", |
|
|
"description": "Search ArXiv/Papers with Code/HF/GitHub", |
|
|
"parameters": _json_object({ |
|
|
"query": {"type": "string"}, |
|
|
"sources": {"type": "array", "items": {"type": "string"}}, |
|
|
"max_results": {"type": "integer", "default": 20}, |
|
|
"search_filters": {"type": "object"}, |
|
|
}, ["query"])}, |
|
|
}) |
|
|
|
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "paper_analysis", |
|
|
"description": "Analyze research papers (methodology, results, reproducibility)", |
|
|
"parameters": _json_object({ |
|
|
"paper_url": {"type": "string"}, |
|
|
"analysis_depth": {"type": "string", "default": "summary"}, |
|
|
"focus_areas": {"type": "array", "items": {"type": "string"}}, |
|
|
}, ["paper_url"])}, |
|
|
}) |
|
|
|
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "github_search", |
|
|
"description": "Advanced GitHub repo search", |
|
|
"parameters": _json_object({ |
|
|
"query": {"type": "string"}, |
|
|
"language": {"type": "string"}, |
|
|
"stars": {"type": "integer"}, |
|
|
"forks": {"type": "integer"}, |
|
|
"topics": {"type": "array", "items": {"type": "string"}}, |
|
|
"sort_by": {"type": "string", "default": "updated"}, |
|
|
}, ["query"])}, |
|
|
}) |
|
|
|
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "hf_model_search", |
|
|
"description": "Hugging Face model discovery with metrics", |
|
|
"parameters": _json_object({ |
|
|
"search_query": {"type": "string"}, |
|
|
"task_type": {"type": "string"}, |
|
|
"framework": {"type": "string"}, |
|
|
"downloads_threshold": {"type": "integer"}, |
|
|
}, ["search_query"])}, |
|
|
}) |
|
|
|
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "thinking_analysis", |
|
|
"description": "Reasoning frameworks: CoT/ToT/Reflexion", |
|
|
"parameters": _json_object({ |
|
|
"problem": {"type": "string"}, |
|
|
"method": {"type": "string", "enum": ["chain_of_thought", "tree_of_thoughts", "reflexion"]}, |
|
|
"complexity_level": {"type": "string", "default": "medium"}, |
|
|
"reasoning_depth": {"type": "integer", "default": 3}, |
|
|
}, ["problem", "method"])}, |
|
|
}) |
|
|
|
|
|
|
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "self_modify", |
|
|
"description": "Modify codebase: add functions, optimize performance, add features", |
|
|
"parameters": _json_object({ |
|
|
"modification_type": {"type": "string"}, |
|
|
"target_file": {"type": "string"}, |
|
|
"changes": {"type": "object"}, |
|
|
"validation_required": {"type": "boolean", "default": True}, |
|
|
}, ["modification_type", "target_file", "changes"])}, |
|
|
}) |
|
|
|
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "code_generation", |
|
|
"description": "Generate code modules from specification", |
|
|
"parameters": _json_object({ |
|
|
"specification": {"type": "string"}, |
|
|
"language": {"type": "string", "default": "python"}, |
|
|
"framework": {"type": "string"}, |
|
|
"requirements": {"type": "array", "items": {"type": "string"}}, |
|
|
}, ["specification"])}, |
|
|
}) |
|
|
|
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "refactor_code", |
|
|
"description": "Automated code refactoring for performance/readability", |
|
|
"parameters": _json_object({ |
|
|
"target_file": {"type": "string"}, |
|
|
"refactoring_type": {"type": "string"}, |
|
|
"optimization_level": {"type": "string", "default": "standard"}, |
|
|
}, ["target_file", "refactoring_type"])}, |
|
|
}) |
|
|
|
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "add_feature", |
|
|
"description": "Add new features with integration tests", |
|
|
"parameters": _json_object({ |
|
|
"feature_spec": {"type": "string"}, |
|
|
"target_module": {"type": "string"}, |
|
|
"test_required": {"type": "boolean", "default": True}, |
|
|
}, ["feature_spec", "target_module"])}, |
|
|
}) |
|
|
|
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "optimize_code", |
|
|
"description": "Performance optimization with profiling", |
|
|
"parameters": _json_object({ |
|
|
"target_file": {"type": "string"}, |
|
|
"optimization_target": {"type": "string"}, |
|
|
"benchmark_config": {"type": "object"}, |
|
|
}, ["target_file", "optimization_target"])}, |
|
|
}) |
|
|
|
|
|
|
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "experiment_tracking", |
|
|
"description": "Experiment tracking with MLflow/W&B", |
|
|
"parameters": _json_object({ |
|
|
"experiment_name": {"type": "string"}, |
|
|
"tracking_config": {"type": "object"}, |
|
|
"metrics_config": {"type": "object"}, |
|
|
}, ["experiment_name"])}, |
|
|
}) |
|
|
|
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "model_registry", |
|
|
"description": "Model versioning and lifecycle management", |
|
|
"parameters": _json_object({ |
|
|
"model_name": {"type": "string"}, |
|
|
"version": {"type": "string"}, |
|
|
"stage": {"type": "string"}, |
|
|
"metadata": {"type": "object"}, |
|
|
}, ["model_name"])}, |
|
|
}) |
|
|
|
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "deployment_pipeline", |
|
|
"description": "Deployment pipeline with containerization and scaling", |
|
|
"parameters": _json_object({ |
|
|
"model_path": {"type": "string"}, |
|
|
"deployment_target": {"type": "string"}, |
|
|
"scaling_config": {"type": "object"}, |
|
|
}, ["model_path", "deployment_target"])}, |
|
|
}) |
|
|
|
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "performance_benchmark", |
|
|
"description": "Performance benchmarking across HW/SW", |
|
|
"parameters": _json_object({ |
|
|
"models_to_benchmark": {"type": "array", "items": {"type": "string"}}, |
|
|
"benchmark_tasks": {"type": "array", "items": {"type": "string"}}, |
|
|
"hardware_configs": {"type": "array", "items": {"type": "string"}}, |
|
|
})}, |
|
|
}) |
|
|
|
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "gpu_optimization", |
|
|
"description": "GPU optimization: memory mgmt, kernel fusion, mixed precision", |
|
|
"parameters": _json_object({ |
|
|
"model_config": {"type": "object"}, |
|
|
"gpu_config": {"type": "object"}, |
|
|
"optimization_level": {"type": "string", "default": "auto"}, |
|
|
})}, |
|
|
}) |
|
|
|
|
|
|
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "distributed_training", |
|
|
"description": "Multi-GPU/multi-node distributed training setup", |
|
|
"parameters": _json_object({ |
|
|
"training_config": {"type": "object"}, |
|
|
"node_config": {"type": "object"}, |
|
|
"communication_backend": {"type": "string", "default": "nccl"}, |
|
|
})}, |
|
|
}) |
|
|
|
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "cloud_training", |
|
|
"description": "Cloud-based training via AWS/GCP/Azure", |
|
|
"parameters": _json_object({ |
|
|
"cloud_provider": {"type": "string"}, |
|
|
"instance_config": {"type": "object"}, |
|
|
"training_job_config": {"type": "object"}, |
|
|
}, ["cloud_provider"])}, |
|
|
}) |
|
|
|
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "resource_monitoring", |
|
|
"description": "Real-time resource monitoring", |
|
|
"parameters": _json_object({ |
|
|
"monitoring_scope": {"type": "string"}, |
|
|
"alert_config": {"type": "object"}, |
|
|
"visualization": {"type": "object"}, |
|
|
})}, |
|
|
}) |
|
|
|
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "cost_optimization", |
|
|
"description": "Training cost optimization", |
|
|
"parameters": _json_object({ |
|
|
"cost_config": {"type": "object"}, |
|
|
"optimization_strategy": {"type": "string"}, |
|
|
"budget_limits": {"type": "object"}, |
|
|
})}, |
|
|
}) |
|
|
|
|
|
|
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "literature_review", |
|
|
"description": "Automated literature review with trends", |
|
|
"parameters": _json_object({ |
|
|
"topic": {"type": "string"}, |
|
|
"time_range": {"type": "string"}, |
|
|
"sources": {"type": "array", "items": {"type": "string"}}, |
|
|
"analysis_depth": {"type": "string", "default": "summary"}, |
|
|
}, ["topic"])}, |
|
|
}) |
|
|
|
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "methodology_analysis", |
|
|
"description": "Analyze research methodologies and experimental designs", |
|
|
"parameters": _json_object({ |
|
|
"papers_to_analyze": {"type": "array", "items": {"type": "string"}}, |
|
|
"focus_areas": {"type": "array", "items": {"type": "string"}}, |
|
|
"comparison_criteria": {"type": "array", "items": {"type": "string"}}, |
|
|
}, ["papers_to_analyze"])}, |
|
|
}) |
|
|
|
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "reproducibility_check", |
|
|
"description": "Automated reproducibility verification", |
|
|
"parameters": _json_object({ |
|
|
"paper_info": {"type": "object"}, |
|
|
"code_availability": {"type": "string"}, |
|
|
"data_availability": {"type": "string"}, |
|
|
}, ["paper_info"])}, |
|
|
}) |
|
|
|
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "benchmark_creation", |
|
|
"description": "Create benchmarks for model evaluation", |
|
|
"parameters": _json_object({ |
|
|
"benchmark_config": {"type": "object"}, |
|
|
"evaluation_tasks": {"type": "array", "items": {"type": "string"}}, |
|
|
"metrics": {"type": "array", "items": {"type": "string"}}, |
|
|
}, ["benchmark_config"])}, |
|
|
}) |
|
|
|
|
|
|
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "perplexity_search", |
|
|
"description": "Query Perplexity API for web answers", |
|
|
"parameters": _json_object({ |
|
|
"query": {"type": "string"} |
|
|
}, ["query"])}, |
|
|
}) |
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "tavily_search", |
|
|
"description": "Search the web via Tavily API", |
|
|
"parameters": _json_object({ |
|
|
"query": {"type": "string"} |
|
|
}, ["query"])}, |
|
|
}) |
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "serper_search", |
|
|
"description": "Google search via Serper API", |
|
|
"parameters": _json_object({ |
|
|
"query": {"type": "string"} |
|
|
}, ["query"])}, |
|
|
}) |
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "firecrawl_scrape", |
|
|
"description": "Scrape a URL via Firecrawl", |
|
|
"parameters": _json_object({ |
|
|
"url": {"type": "string"} |
|
|
}, ["url"])}, |
|
|
}) |
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "algolia_search", |
|
|
"description": "Algolia search API", |
|
|
"parameters": _json_object({ |
|
|
"index": {"type": "string", "default": "*"}, |
|
|
"query": {"type": "string"} |
|
|
}, ["query"])}, |
|
|
}) |
|
|
|
|
|
|
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "file_read", |
|
|
"description": "Read a file and return its content", |
|
|
"parameters": _json_object({ |
|
|
"path": {"type": "string"} |
|
|
}, ["path"])}, |
|
|
}) |
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "file_write", |
|
|
"description": "Write content to a file (with optional overwrite)", |
|
|
"parameters": _json_object({ |
|
|
"path": {"type": "string"}, |
|
|
"content": {"type": "string"}, |
|
|
"allow_overwrite": {"type": "boolean", "default": False} |
|
|
}, ["path", "content"])}, |
|
|
}) |
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "file_append", |
|
|
"description": "Append content to a file", |
|
|
"parameters": _json_object({ |
|
|
"path": {"type": "string"}, |
|
|
"content": {"type": "string"} |
|
|
}, ["path", "content"])}, |
|
|
}) |
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "file_list", |
|
|
"description": "List directory contents", |
|
|
"parameters": _json_object({ |
|
|
"path": {"type": "string"}, |
|
|
"recursive": {"type": "boolean", "default": False} |
|
|
}, ["path"])}, |
|
|
}) |
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "file_info", |
|
|
"description": "Get file or directory info", |
|
|
"parameters": _json_object({ |
|
|
"path": {"type": "string"} |
|
|
}, ["path"])}, |
|
|
}) |
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "file_delete", |
|
|
"description": "Delete a file or directory (with backup)", |
|
|
"parameters": _json_object({ |
|
|
"path": {"type": "string"} |
|
|
}, ["path"])}, |
|
|
}) |
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "file_copy", |
|
|
"description": "Copy a file to a new path", |
|
|
"parameters": _json_object({ |
|
|
"src": {"type": "string"}, |
|
|
"dst": {"type": "string"}, |
|
|
"allow_overwrite": {"type": "boolean", "default": False} |
|
|
}, ["src", "dst"])}, |
|
|
}) |
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "file_move", |
|
|
"description": "Move/rename a file or directory", |
|
|
"parameters": _json_object({ |
|
|
"src": {"type": "string"}, |
|
|
"dst": {"type": "string"} |
|
|
}, ["src", "dst"])}, |
|
|
}) |
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "mkdir", |
|
|
"description": "Create a directory (parents ok)", |
|
|
"parameters": _json_object({ |
|
|
"path": {"type": "string"} |
|
|
}, ["path"])}, |
|
|
}) |
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "code_exec", |
|
|
"description": "Execute code snippets (python or bash)", |
|
|
"parameters": _json_object({ |
|
|
"code": {"type": "string"}, |
|
|
"language": {"type": "string", "enum": ["python", "bash"], "default": "python"} |
|
|
}, ["code"])}, |
|
|
}) |
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "code_search", |
|
|
"description": "Search files by pattern with ripgrep semantics", |
|
|
"parameters": _json_object({ |
|
|
"pattern": {"type": "string"}, |
|
|
"root": {"type": "string", "default": "."}, |
|
|
"include": {"type": "array", "items": {"type": "string"}}, |
|
|
"exclude": {"type": "array", "items": {"type": "string"}}, |
|
|
"max_results": {"type": "integer", "default": 100}, |
|
|
"case_sensitive": {"type": "boolean", "default": False} |
|
|
}, ["pattern"])}, |
|
|
}) |
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": "format_python", |
|
|
"description": "Format Python files using black", |
|
|
"parameters": _json_object({ |
|
|
"paths": {"type": "array", "items": {"type": "string"}} |
|
|
}, ["paths"])}, |
|
|
}) |
|
|
|
|
|
|
|
|
for name, desc, params in [ |
|
|
("fs_read", "Raw read file", {"path": {"type": "string"}}), |
|
|
("fs_write", "Raw write file", {"path": {"type": "string"}, "content": {"type": "string"}, "allow_overwrite": {"type": "boolean", "default": False}}), |
|
|
("fs_append", "Raw append file", {"path": {"type": "string"}, "content": {"type": "string"}}), |
|
|
("fs_list", "Raw list directory", {"path": {"type": "string"}, "recursive": {"type": "boolean", "default": False}}), |
|
|
("fs_info", "Raw file info", {"path": {"type": "string"}}), |
|
|
("fs_delete", "Raw delete file/dir (with backup)", {"path": {"type": "string"}}), |
|
|
("fs_copy", "Raw copy", {"src": {"type": "string"}, "dst": {"type": "string"}, "allow_overwrite": {"type": "boolean", "default": False}}), |
|
|
("fs_move", "Raw move/rename", {"src": {"type": "string"}, "dst": {"type": "string"}}), |
|
|
("fs_mkdir", "Raw mkdir -p", {"path": {"type": "string"}}), |
|
|
("shell_exec", "Execute shell command with optional cwd/env", {"cmd": {"type": "array", "items": {"type": "string"}}, "cwd": {"type": "string"}, "env": {"type": "object"}}), |
|
|
("http_get", "HTTP GET", {"url": {"type": "string"}, "headers": {"type": "object"}, "params": {"type": "object"}, "timeout": {"type": "integer", "default": 30}}), |
|
|
("http_post", "HTTP POST", {"url": {"type": "string"}, "headers": {"type": "object"}, "json": {"type": "object"}, "data": {"type": "string"}, "timeout": {"type": "integer", "default": 30}}), |
|
|
]: |
|
|
tools.append({ |
|
|
"type": "function", |
|
|
"function": { |
|
|
"name": name, |
|
|
"description": desc, |
|
|
"parameters": _json_object(params, [k for k in params.keys() if k in ("path","src","dst","url","cmd")]) |
|
|
}, |
|
|
}) |
|
|
|
|
|
return tools |
|
|
|
|
|
|
|
|
""" |
|
|
Real tool implementations. No stubs. Many operations are performed directly here. |
|
|
For heavier workflows (training/eval), we may generate runnable scripts or |
|
|
delegate to existing modules in this repo. |
|
|
""" |
|
|
|
|
|
class ToolError(Exception): |
|
|
pass |
|
|
|
|
|
|
|
|
def _ok(message: str, payload: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: |
|
|
return {"status": "ok", "message": message, **({"data": payload} if payload else {})} |
|
|
|
|
|
|
|
|
def _require_env(key: str) -> str: |
|
|
val = os.environ.get(key) |
|
|
if not val: |
|
|
raise ToolError(f"Missing required environment variable: {key}") |
|
|
return val |
|
|
|
|
|
|
|
|
def execute_tool(name: str, arguments: Dict[str, Any]) -> Dict[str, Any]: |
|
|
|
|
|
if name == "self_modify": |
|
|
target = arguments.get("target_file") |
|
|
changes = arguments.get("changes") |
|
|
modification_type = arguments.get("modification_type", "unknown") |
|
|
if not target: |
|
|
raise ToolError("self_modify requires target_file") |
|
|
try: |
|
|
|
|
|
if os.path.exists(target): |
|
|
ts = datetime.utcnow().strftime("%Y%m%d-%H%M%S") |
|
|
backup = f"{target}.bak.{ts}" |
|
|
with open(target, "rb") as rf, open(backup, "wb") as wf: |
|
|
wf.write(rf.read()) |
|
|
|
|
|
content = None |
|
|
if isinstance(changes, dict): |
|
|
content = changes.get("content") |
|
|
if content: |
|
|
with open(target, "a", encoding="utf-8") as f: |
|
|
f.write("\n\n# --- self_modify appended content ---\n") |
|
|
f.write(str(content)) |
|
|
f.write("\n# --- end appended ---\n") |
|
|
return _ok("Changes appended to file", {"target_file": target, "bytes": len(str(content))}) |
|
|
else: |
|
|
with open(target, "a", encoding="utf-8") as f: |
|
|
f.write(f"\n# self_modify note: {json.dumps(arguments)}\n") |
|
|
return _ok("Annotated target file with change note", {"target_file": target}) |
|
|
except Exception as e: |
|
|
raise ToolError(f"self_modify failed: {e}") |
|
|
|
|
|
|
|
|
if name in {"model_training", "hyperparameter_search", "thinking_analysis", "research_search"}: |
|
|
from mlops.elizabeth_mlops_tools import elizabeth_mlops_tools as mlops |
|
|
if name == "model_training": |
|
|
return mlops.model_training(arguments) |
|
|
if name == "hyperparameter_search": |
|
|
return mlops.hyperparameter_search(arguments) |
|
|
if name == "thinking_analysis": |
|
|
return mlops.thinking_analysis(arguments.get("problem", ""), arguments.get("method", "chain_of_thought")) |
|
|
if name == "research_search": |
|
|
return mlops.research_search(arguments.get("query", ""), arguments.get("sources")) |
|
|
|
|
|
|
|
|
if name in {"file_read", "file_write", "file_append", "file_list", "file_info", "file_delete", "file_copy"}: |
|
|
from mlops.elizabeth_tools import elizabeth_tools as tools |
|
|
if name == "file_copy": |
|
|
src = arguments["src"] |
|
|
dst = arguments["dst"] |
|
|
allow = bool(arguments.get("allow_overwrite", False)) |
|
|
res = tools.file_operations("copy", path=src, content=None, recursive=False, allow_overwrite=allow) |
|
|
return res | {"dst": dst} |
|
|
op_map = { |
|
|
"file_read": "read", |
|
|
"file_write": "write", |
|
|
"file_append": "append", |
|
|
"file_list": "list", |
|
|
"file_info": "info", |
|
|
"file_delete": "delete", |
|
|
} |
|
|
op = op_map[name] |
|
|
res = tools.file_operations(op, path=arguments.get("path"), content=arguments.get("content"), recursive=bool(arguments.get("recursive", False)), allow_overwrite=bool(arguments.get("allow_overwrite", False))) |
|
|
return res |
|
|
|
|
|
if name == "file_move": |
|
|
import shutil |
|
|
src = arguments["src"] |
|
|
dst = arguments["dst"] |
|
|
shutil.move(src, dst) |
|
|
return _ok("moved", {"src": src, "dst": dst}) |
|
|
|
|
|
if name == "mkdir": |
|
|
os.makedirs(arguments["path"], exist_ok=True) |
|
|
return _ok("directory created", {"path": arguments["path"]}) |
|
|
|
|
|
if name == "code_exec": |
|
|
lang = (arguments.get("language") or "python").lower() |
|
|
code = arguments["code"] |
|
|
import tempfile, subprocess, shlex |
|
|
if lang in ("python", "py"): |
|
|
from mlops.elizabeth_tools import elizabeth_tools as tools |
|
|
return tools.code_execution(code, "python") |
|
|
if lang in ("bash", "sh"): |
|
|
from mlops.elizabeth_tools import elizabeth_tools as tools |
|
|
return tools.code_execution(code, "bash") |
|
|
|
|
|
interp = { |
|
|
"node": ["node", "-e"], |
|
|
"ruby": ["ruby", "-e"], |
|
|
"perl": ["perl", "-e"], |
|
|
"php": ["php", "-r"], |
|
|
}.get(lang) |
|
|
if interp: |
|
|
try: |
|
|
res = subprocess.run(interp + [code], capture_output=True, text=True) |
|
|
return {"success": res.returncode == 0, "stdout": res.stdout, "stderr": res.stderr, "return_code": res.returncode} |
|
|
except Exception as e: |
|
|
return {"success": False, "error": str(e)} |
|
|
|
|
|
with tempfile.TemporaryDirectory() as td: |
|
|
td = td |
|
|
if lang in ("go",): |
|
|
src = os.path.join(td, "main.go"); open(src, "w").write(code) |
|
|
cmd = ["go", "run", src] |
|
|
elif lang in ("c",): |
|
|
src = os.path.join(td, "main.c"); open(src, "w").write(code) |
|
|
binp = os.path.join(td, "a.out") |
|
|
cmd = ["bash", "-lc", f"gcc -O2 {shlex.quote(src)} -o {shlex.quote(binp)} && {shlex.quote(binp)}"] |
|
|
elif lang in ("cpp", "c++"): |
|
|
src = os.path.join(td, "main.cpp"); open(src, "w").write(code) |
|
|
binp = os.path.join(td, "a.out") |
|
|
cmd = ["bash", "-lc", f"g++ -O2 {shlex.quote(src)} -o {shlex.quote(binp)} && {shlex.quote(binp)}"] |
|
|
else: |
|
|
return {"success": False, "error": f"Unsupported language: {lang}"} |
|
|
try: |
|
|
res = subprocess.run(cmd, capture_output=True, text=True) |
|
|
return {"success": res.returncode == 0, "stdout": res.stdout, "stderr": res.stderr, "return_code": res.returncode} |
|
|
except Exception as e: |
|
|
return {"success": False, "error": str(e)} |
|
|
|
|
|
if name == "code_search": |
|
|
import subprocess |
|
|
root = arguments.get("root", ".") |
|
|
pattern = arguments["pattern"] |
|
|
include = arguments.get("include") or [] |
|
|
exclude = arguments.get("exclude") or [] |
|
|
max_results = int(arguments.get("max_results", 100)) |
|
|
cs = bool(arguments.get("case_sensitive", False)) |
|
|
cmd = ["rg", "-n", "--no-heading", "--with-filename", "--json"] |
|
|
if not cs: |
|
|
cmd.append("-i") |
|
|
for g in include: |
|
|
cmd += ["-g", g] |
|
|
for g in exclude: |
|
|
cmd += ["-g", f"!{g}"] |
|
|
cmd += [pattern, root] |
|
|
try: |
|
|
out = subprocess.check_output(cmd, stderr=subprocess.DEVNULL, text=True) |
|
|
except Exception as e: |
|
|
return {"status": "error", "message": f"code_search failed: {e}"} |
|
|
matches = [] |
|
|
for line in out.splitlines(): |
|
|
try: |
|
|
rec = json.loads(line) |
|
|
if rec.get("type") == "match": |
|
|
m = rec["data"] |
|
|
matches.append({ |
|
|
"path": m["path"]["text"], |
|
|
"line": m["lines"]["text"].strip(), |
|
|
"line_number": m["line_number"], |
|
|
}) |
|
|
if len(matches) >= max_results: |
|
|
break |
|
|
except Exception: |
|
|
continue |
|
|
return _ok("code_search results", {"count": len(matches), "matches": matches}) |
|
|
|
|
|
if name == "format_python": |
|
|
import subprocess |
|
|
paths = arguments.get("paths", []) |
|
|
try: |
|
|
subprocess.check_call([sys.executable, "-m", "black", *paths], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) |
|
|
return _ok("formatted", {"paths": paths}) |
|
|
except Exception as e: |
|
|
return {"status": "error", "message": f"format_python failed: {e}"} |
|
|
|
|
|
|
|
|
if name == "fs_read": |
|
|
with open(arguments["path"], "r", encoding="utf-8", errors="ignore") as f: |
|
|
return _ok("read", {"content": f.read()}) |
|
|
if name == "fs_write": |
|
|
path = arguments["path"]; content = arguments["content"]; allow = bool(arguments.get("allow_overwrite", False)) |
|
|
os.makedirs(os.path.dirname(path) or ".", exist_ok=True) |
|
|
if os.path.exists(path) and not allow: |
|
|
import time |
|
|
backup = f"{path}.bak.{int(time.time())}" |
|
|
import shutil; shutil.copy2(path, backup) |
|
|
with open(path, "w", encoding="utf-8") as f: |
|
|
f.write(content) |
|
|
return _ok("written", {"path": path}) |
|
|
if name == "fs_append": |
|
|
path = arguments["path"]; content = arguments["content"] |
|
|
os.makedirs(os.path.dirname(path) or ".", exist_ok=True) |
|
|
with open(path, "a", encoding="utf-8") as f: |
|
|
f.write(content) |
|
|
return _ok("appended", {"path": path}) |
|
|
if name == "fs_list": |
|
|
path = arguments["path"]; rec = bool(arguments.get("recursive", False)) |
|
|
items = [] |
|
|
if rec: |
|
|
for root, dirs, files in os.walk(path): |
|
|
for n in files: |
|
|
items.append(os.path.join(root, n)) |
|
|
else: |
|
|
items = os.listdir(path) if os.path.exists(path) else [] |
|
|
return _ok("listed", {"items": items}) |
|
|
if name == "fs_info": |
|
|
path = arguments["path"]; st = os.stat(path) |
|
|
return _ok("info", {"size": st.st_size, "mtime": st.st_mtime, "is_dir": os.path.isdir(path), "mode": oct(st.st_mode)}) |
|
|
if name == "fs_delete": |
|
|
path = arguments["path"] |
|
|
import shutil, time |
|
|
backup = f"/tmp/elizabeth_backup_{int(time.time())}_{os.path.basename(path)}" |
|
|
if os.path.isdir(path): |
|
|
shutil.copytree(path, backup) |
|
|
shutil.rmtree(path) |
|
|
else: |
|
|
if os.path.exists(path): |
|
|
shutil.copy2(path, backup) |
|
|
os.remove(path) |
|
|
return _ok("deleted", {"path": path, "backup": backup}) |
|
|
if name == "fs_copy": |
|
|
import shutil |
|
|
src = arguments["src"]; dst = arguments["dst"]; allow = bool(arguments.get("allow_overwrite", False)) |
|
|
os.makedirs(os.path.dirname(dst) or ".", exist_ok=True) |
|
|
if os.path.exists(dst) and not allow: |
|
|
dst = dst + ".copy" |
|
|
shutil.copy2(src, dst) |
|
|
return _ok("copied", {"src": src, "dst": dst}) |
|
|
if name == "fs_move": |
|
|
import shutil |
|
|
src = arguments["src"]; dst = arguments["dst"] |
|
|
os.makedirs(os.path.dirname(dst) or ".", exist_ok=True) |
|
|
shutil.move(src, dst) |
|
|
return _ok("moved", {"src": src, "dst": dst}) |
|
|
if name == "fs_mkdir": |
|
|
os.makedirs(arguments["path"], exist_ok=True) |
|
|
return _ok("mkdir", {"path": arguments["path"]}) |
|
|
if name == "shell_exec": |
|
|
import subprocess |
|
|
cmd = arguments.get("cmd") or [] |
|
|
cwd = arguments.get("cwd") |
|
|
env = os.environ.copy() |
|
|
for k, v in (arguments.get("env") or {}).items(): |
|
|
env[str(k)] = str(v) |
|
|
try: |
|
|
res = subprocess.run(cmd, cwd=cwd, env=env, capture_output=True, text=True) |
|
|
return {"success": res.returncode == 0, "stdout": res.stdout, "stderr": res.stderr, "return_code": res.returncode} |
|
|
except Exception as e: |
|
|
return {"success": False, "error": str(e)} |
|
|
if name == "http_get": |
|
|
try: |
|
|
r = requests.get(arguments["url"], headers=arguments.get("headers"), params=arguments.get("params"), timeout=int(arguments.get("timeout",30))) |
|
|
return _ok("http_get", {"status": r.status_code, "headers": dict(r.headers), "text": r.text[:200000]}) |
|
|
except Exception as e: |
|
|
return {"status": "error", "message": f"http_get failed: {e}"} |
|
|
if name == "http_post": |
|
|
try: |
|
|
r = requests.post(arguments["url"], headers=arguments.get("headers"), json=arguments.get("json"), data=arguments.get("data"), timeout=int(arguments.get("timeout",30))) |
|
|
return _ok("http_post", {"status": r.status_code, "headers": dict(r.headers), "text": r.text[:200000]}) |
|
|
except Exception as e: |
|
|
return {"status": "error", "message": f"http_post failed: {e}"} |
|
|
|
|
|
|
|
|
if name == "dataset_preparation": |
|
|
ds_path = arguments["dataset_path"] |
|
|
tokenizer_name = arguments["tokenizer_name"] |
|
|
max_len = int(arguments.get("max_length", 2048)) |
|
|
out_dir = arguments.get("output_dir", f"/tmp/elizabeth_preprocessed_{datetime.utcnow().strftime('%Y%m%d-%H%M%S')}") |
|
|
os.makedirs(out_dir, exist_ok=True) |
|
|
try: |
|
|
from datasets import load_dataset |
|
|
from transformers import AutoTokenizer |
|
|
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, trust_remote_code=True) |
|
|
dataset = load_dataset(ds_path) |
|
|
|
|
|
def tok_fn(example): |
|
|
text = example.get("text") or " ".join(str(v) for v in example.values() if isinstance(v, str)) |
|
|
return tokenizer(text, truncation=True, max_length=max_len) |
|
|
|
|
|
processed = dataset.map(tok_fn, batched=False) |
|
|
saved = processed.save_to_disk(out_dir) |
|
|
return _ok("Dataset tokenized and saved", {"output_dir": out_dir}) |
|
|
except Exception as e: |
|
|
raise ToolError(f"dataset_preparation failed: {e}") |
|
|
|
|
|
|
|
|
if name == "model_evaluation": |
|
|
model_path = arguments["model_path"] |
|
|
eval_ds = arguments["evaluation_dataset"] |
|
|
metrics = arguments.get("metrics", ["perplexity"]) |
|
|
script = f""" |
|
|
import time, json |
|
|
from datasets import load_dataset |
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
import torch |
|
|
|
|
|
model_name = "{model_path}" |
|
|
dataset_id = "{eval_ds}" |
|
|
metrics = {json.dumps(metrics)} |
|
|
|
|
|
tok = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) |
|
|
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, device_map="auto", trust_remote_code=True) |
|
|
ds = load_dataset(dataset_id, split="validation" if "validation" in load_dataset(dataset_id).keys() else "train") |
|
|
|
|
|
def ppl(batch_texts): |
|
|
import math |
|
|
import torch |
|
|
enc = tok(batch_texts, return_tensors="pt", padding=True).to(model.device) |
|
|
with torch.no_grad(): |
|
|
outputs = model(**enc, labels=enc["input_ids"]) # causal LM loss |
|
|
loss = outputs.loss.item() |
|
|
return math.exp(loss) |
|
|
|
|
|
texts = [str(r.get("text") or next((v for v in r.values() if isinstance(v, str)), "")) for r in ds.select(range(min(128, len(ds))))] |
|
|
perp = ppl(texts) |
|
|
print(json.dumps({{"perplexity": perp}})) |
|
|
""" |
|
|
path = f"/tmp/elizabeth_eval_{datetime.utcnow().strftime('%Y%m%d-%H%M%S')}.py" |
|
|
with open(path, "w", encoding="utf-8") as f: |
|
|
f.write(script) |
|
|
return _ok("Evaluation script created", {"script_path": path, "metrics": metrics}) |
|
|
|
|
|
|
|
|
if name == "training_monitor": |
|
|
info = {} |
|
|
try: |
|
|
import psutil |
|
|
info["cpu"] = {"percent": psutil.cpu_percent(interval=0.5), "count": psutil.cpu_count()} |
|
|
vm = psutil.virtual_memory() |
|
|
info["memory"] = {"total": vm.total, "used": vm.used, "percent": vm.percent} |
|
|
except Exception: |
|
|
pass |
|
|
try: |
|
|
out = subprocess.check_output(["nvidia-smi", "--query-gpu=memory.used,memory.total,utilization.gpu,temperature.gpu", "--format=csv,noheader,nounits"], stderr=subprocess.DEVNULL).decode().strip().split("\n") |
|
|
gpus = [] |
|
|
for i, line in enumerate(out): |
|
|
if line.strip(): |
|
|
used, total, util, temp = [int(x) for x in line.split(", ")] |
|
|
gpus.append({"index": i, "mem_used": used, "mem_total": total, "util": util, "temp": temp}) |
|
|
info["gpus"] = gpus |
|
|
except Exception: |
|
|
info["gpus"] = [] |
|
|
return _ok("Training monitor snapshot", info) |
|
|
|
|
|
|
|
|
if name == "github_search": |
|
|
q = arguments["query"] |
|
|
params = {"q": q, "sort": arguments.get("sort_by", "updated"), "per_page": 10} |
|
|
if arguments.get("language"): |
|
|
params["q"] += f" language:{arguments['language']}" |
|
|
headers = {"Accept": "application/vnd.github+json"} |
|
|
if os.environ.get("GITHUB_TOKEN"): |
|
|
headers["Authorization"] = f"Bearer {os.environ['GITHUB_TOKEN']}" |
|
|
r = requests.get("https://api.github.com/search/repositories", params=params, headers=headers, timeout=30) |
|
|
r.raise_for_status() |
|
|
data = r.json() |
|
|
items = [ |
|
|
{ |
|
|
"full_name": it["full_name"], |
|
|
"html_url": it["html_url"], |
|
|
"stars": it.get("stargazers_count", 0), |
|
|
"updated_at": it.get("updated_at"), |
|
|
"description": it.get("description"), |
|
|
} |
|
|
for it in data.get("items", []) |
|
|
] |
|
|
return _ok("github_search results", {"count": len(items), "items": items}) |
|
|
|
|
|
|
|
|
if name == "hf_model_search": |
|
|
from huggingface_hub import list_models |
|
|
query = arguments.get("search_query", "") |
|
|
task = arguments.get("task_type") |
|
|
models = list(list_models(search=query, filter=task) if task else list_models(search=query)) |
|
|
items = [{"modelId": m.modelId, "downloads": m.downloads, "likes": m.likes, "pipeline_tag": getattr(m, "pipeline_tag", None)} for m in models[:25]] |
|
|
return _ok("hf_model_search results", {"count": len(items), "items": items}) |
|
|
|
|
|
|
|
|
if name == "paper_analysis": |
|
|
from bs4 import BeautifulSoup |
|
|
url = arguments["paper_url"] |
|
|
depth = arguments.get("analysis_depth", "summary") |
|
|
r = requests.get(url, timeout=45) |
|
|
r.raise_for_status() |
|
|
soup = BeautifulSoup(r.text, "html.parser") |
|
|
title = soup.find("title").get_text(strip=True) if soup.find("title") else None |
|
|
headings = [h.get_text(strip=True) for h in soup.select("h1, h2, h3")] |
|
|
links = [a.get("href") for a in soup.find_all("a") if a.get("href")] |
|
|
return _ok("paper analyzed", {"title": title, "headings": headings[:30], "links": links[:50], "url": url, "depth": depth}) |
|
|
|
|
|
|
|
|
if name == "code_generation": |
|
|
spec = arguments["specification"] |
|
|
language = arguments.get("language", "python").lower() |
|
|
out_dir = arguments.get("output_dir", f"/tmp/elizabeth_codegen_{datetime.utcnow().strftime('%Y%m%d-%H%M%S')}") |
|
|
os.makedirs(out_dir, exist_ok=True) |
|
|
filename = os.path.join(out_dir, f"module.{ 'py' if language=='python' else language }") |
|
|
content = f"""# Auto-generated by Elizabeth code_generation\n# Language: {language}\n\n""" + spec + "\n" |
|
|
with open(filename, "w", encoding="utf-8") as f: |
|
|
f.write(content) |
|
|
return _ok("code generated", {"file": filename}) |
|
|
|
|
|
|
|
|
if name == "refactor_code": |
|
|
target = arguments["target_file"] |
|
|
try: |
|
|
subprocess.check_call([sys.executable, "-m", "black", target], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) |
|
|
return _ok("refactor applied (black)", {"target_file": target}) |
|
|
except Exception as e: |
|
|
raise ToolError(f"refactor_code failed: {e}") |
|
|
|
|
|
|
|
|
if name == "add_feature": |
|
|
target = arguments["target_module"] |
|
|
spec = arguments["feature_spec"] |
|
|
stub = f"\n\n# Added feature\n{spec}\n" |
|
|
with open(target, "a", encoding="utf-8") as f: |
|
|
f.write(stub) |
|
|
return _ok("feature appended", {"target_module": target, "bytes": len(stub)}) |
|
|
|
|
|
|
|
|
if name == "optimize_code": |
|
|
target = arguments["target_file"] |
|
|
script = f""" |
|
|
import cProfile, pstats |
|
|
prof = cProfile.Profile() |
|
|
prof.enable() |
|
|
import importlib.util |
|
|
spec = importlib.util.spec_from_file_location("target_mod", "{target}") |
|
|
mod = importlib.util.module_from_spec(spec) |
|
|
spec.loader.exec_module(mod) # type: ignore |
|
|
prof.disable() |
|
|
ps = pstats.Stats(prof).sort_stats(pstats.SortKey.TIME) |
|
|
ps.dump_stats("/tmp/profile.stats") |
|
|
print("Profile saved to /tmp/profile.stats") |
|
|
""" |
|
|
path = f"/tmp/elizabeth_opt_{datetime.utcnow().strftime('%Y%m%d-%H%M%S')}.py" |
|
|
with open(path, "w", encoding="utf-8") as f: |
|
|
f.write(script) |
|
|
return _ok("optimization profile script created", {"script_path": path}) |
|
|
|
|
|
|
|
|
if name == "experiment_tracking": |
|
|
try: |
|
|
import mlflow |
|
|
exp_name = arguments.get("experiment_name", "elizabeth_experiments") |
|
|
mlflow.set_tracking_uri(f"sqlite:////data/adaptai/platform/aiml/mlops/mlflow.db") |
|
|
mlflow.set_experiment(exp_name) |
|
|
with mlflow.start_run(run_name=arguments.get("run_name", "quick_run")): |
|
|
for k, v in (arguments.get("metrics_config") or {}).items(): |
|
|
mlflow.log_metric(k, float(v)) |
|
|
for k, v in (arguments.get("tracking_config") or {}).items(): |
|
|
mlflow.log_param(k, v) |
|
|
run = mlflow.active_run() |
|
|
return _ok("mlflow run logged", {"experiment": exp_name, "run_id": run.info.run_id if run else None}) |
|
|
except Exception as e: |
|
|
raise ToolError(f"experiment_tracking failed: {e}") |
|
|
|
|
|
|
|
|
if name == "model_registry": |
|
|
reg = {"model_name": arguments.get("model_name"), "version": arguments.get("version"), "stage": arguments.get("stage"), "metadata": arguments.get("metadata")} |
|
|
registry_path = "/data/adaptai/platform/aiml/mlops/model_registry.json" |
|
|
try: |
|
|
os.makedirs(os.path.dirname(registry_path), exist_ok=True) |
|
|
items = [] |
|
|
if os.path.exists(registry_path): |
|
|
with open(registry_path, "r", encoding="utf-8") as f: |
|
|
items = json.load(f) |
|
|
items.append(reg) |
|
|
with open(registry_path, "w", encoding="utf-8") as f: |
|
|
json.dump(items, f, indent=2) |
|
|
return _ok("model registered", {"registry": registry_path}) |
|
|
except Exception as e: |
|
|
raise ToolError(f"model_registry failed: {e}") |
|
|
|
|
|
|
|
|
if name == "deployment_pipeline": |
|
|
model_path = arguments["model_path"] |
|
|
target = arguments.get("deployment_target", "local") |
|
|
out_dir = arguments.get("output_dir", f"/tmp/elizabeth_deploy_{datetime.utcnow().strftime('%Y%m%d-%H%M%S')}") |
|
|
os.makedirs(out_dir, exist_ok=True) |
|
|
dockerfile = os.path.join(out_dir, "Dockerfile") |
|
|
with open(dockerfile, "w", encoding="utf-8") as f: |
|
|
f.write(textwrap.dedent(f""" |
|
|
FROM nvidia/cuda:12.1.0-runtime-ubuntu22.04 |
|
|
RUN apt-get update && apt-get install -y python3 python3-pip && rm -rf /var/lib/apt/lists/* |
|
|
RUN pip3 install vllm transformers accelerate |
|
|
COPY . /app |
|
|
WORKDIR /app |
|
|
CMD ["bash", "-lc", "python -m vllm.entrypoints.openai.api_server --model {model_path} --port 8000 --host 0.0.0.0 --trust-remote-code"] |
|
|
""")) |
|
|
return _ok("deployment assets created", {"dir": out_dir, "dockerfile": dockerfile, "target": target}) |
|
|
|
|
|
|
|
|
if name == "performance_benchmark": |
|
|
base = os.environ.get("ELIZABETH_BASE_URL", "http://localhost:8000/v1") |
|
|
start = datetime.utcnow() |
|
|
body = {"model": DEFAULT_MODEL, "messages": [{"role": "user", "content": "Say 'benchmark'."}], "max_tokens": 16} |
|
|
r = requests.post(base.rstrip("/") + "/chat/completions", headers=build_headers(DEFAULT_API_KEY), data=json.dumps(body), timeout=60) |
|
|
r.raise_for_status() |
|
|
elapsed = (datetime.utcnow() - start).total_seconds() |
|
|
return _ok("benchmark complete", {"elapsed_seconds": elapsed, "status_code": r.status_code}) |
|
|
|
|
|
|
|
|
if name == "gpu_optimization": |
|
|
try: |
|
|
out = subprocess.check_output(["nvidia-smi"], stderr=subprocess.DEVNULL).decode() |
|
|
return _ok("nvidia-smi output", {"nvidia_smi": out}) |
|
|
except Exception as e: |
|
|
raise ToolError(f"gpu_optimization failed: {e}") |
|
|
|
|
|
|
|
|
if name == "distributed_training": |
|
|
return _ok("distributed training planned", {"backend": arguments.get("communication_backend", "nccl"), "nodes": arguments.get("node_config")}) |
|
|
if name == "cloud_training": |
|
|
return _ok("cloud training config accepted", {"cloud_provider": arguments.get("cloud_provider"), "instance_config": arguments.get("instance_config")}) |
|
|
if name == "resource_monitoring": |
|
|
return execute_tool("training_monitor", {}) |
|
|
if name == "cost_optimization": |
|
|
return _ok("cost optimization strategy accepted", {"strategy": arguments.get("optimization_strategy")}) |
|
|
|
|
|
|
|
|
if name == "literature_review": |
|
|
topic = arguments["topic"] |
|
|
|
|
|
gh = execute_tool("github_search", {"query": topic}) |
|
|
hf = execute_tool("hf_model_search", {"search_query": topic}) |
|
|
return _ok("literature review seed", {"topic": topic, "github": gh.get("data"), "hf": hf.get("data")}) |
|
|
if name == "methodology_analysis": |
|
|
papers = arguments.get("papers_to_analyze", []) |
|
|
analyses = [execute_tool("paper_analysis", {"paper_url": u}) for u in papers[:5]] |
|
|
return _ok("methodology analyzed", {"papers": analyses}) |
|
|
if name == "reproducibility_check": |
|
|
info = arguments.get("paper_info", {}) |
|
|
code = info.get("code_url") or arguments.get("code_availability") |
|
|
data = info.get("data_url") or arguments.get("data_availability") |
|
|
return _ok("reproducibility triage", {"has_code": bool(code), "has_data": bool(data)}) |
|
|
if name == "benchmark_creation": |
|
|
cfg = arguments.get("benchmark_config", {}) |
|
|
out = f"/tmp/elizabeth_benchmark_{datetime.utcnow().strftime('%Y%m%d-%H%M%S')}.json" |
|
|
with open(out, "w", encoding="utf-8") as f: |
|
|
json.dump(cfg, f, indent=2) |
|
|
return _ok("benchmark config saved", {"path": out}) |
|
|
|
|
|
|
|
|
if name == "perplexity_search": |
|
|
key = _require_env("PERPLEXITY_API_KEY") |
|
|
q = arguments["query"] |
|
|
|
|
|
r = requests.post( |
|
|
"https://api.perplexity.ai/chat/completions", |
|
|
headers={"Authorization": f"Bearer {key}", "Content-Type": "application/json"}, |
|
|
data=json.dumps({"model": "sonar-pro", "messages": [{"role": "user", "content": q}], "max_tokens": 256}), |
|
|
timeout=60, |
|
|
) |
|
|
r.raise_for_status() |
|
|
return _ok("perplexity response", r.json()) |
|
|
if name == "tavily_search": |
|
|
key = _require_env("TAVILY_API_KEY") |
|
|
q = arguments["query"] |
|
|
r = requests.post("https://api.tavily.com/search", json={"api_key": key, "query": q, "max_results": 5}, timeout=45) |
|
|
r.raise_for_status() |
|
|
return _ok("tavily results", r.json()) |
|
|
if name == "serper_search": |
|
|
key = _require_env("SERPER_API_KEY") |
|
|
q = arguments["query"] |
|
|
r = requests.post("https://google.serper.dev/search", headers={"X-API-KEY": key, "Content-Type": "application/json"}, data=json.dumps({"q": q}), timeout=45) |
|
|
r.raise_for_status() |
|
|
return _ok("serper results", r.json()) |
|
|
if name == "firecrawl_scrape": |
|
|
key = _require_env("FIRECRAWL_API_KEY") |
|
|
url = arguments["url"] |
|
|
r = requests.post("https://api.firecrawl.dev/v1/scrape", headers={"Authorization": f"Bearer {key}", "Content-Type": "application/json"}, data=json.dumps({"url": url}), timeout=60) |
|
|
r.raise_for_status() |
|
|
return _ok("firecrawl content", r.json()) |
|
|
if name == "algolia_search": |
|
|
app_id = _require_env("Algolia_Application_ID") |
|
|
key = _require_env("Algolia_Search_API_Key") |
|
|
index = arguments.get("index", "*") |
|
|
q = arguments.get("query", "") |
|
|
endpoint = f"https://{app_id}-dsn.algolia.net/1/indexes/{index}/query" |
|
|
r = requests.post(endpoint, headers={"X-Algolia-API-Key": key, "X-Algolia-Application-Id": app_id, "Content-Type": "application/json"}, data=json.dumps({"query": q, "hitsPerPage": 10}), timeout=45) |
|
|
r.raise_for_status() |
|
|
return _ok("algolia results", r.json()) |
|
|
|
|
|
raise ToolError(f"Unknown tool: {name}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass |
|
|
class LLMConfig: |
|
|
base_url: str |
|
|
model: str |
|
|
api_key: str |
|
|
timeout: int = 300 |
|
|
max_steps: int = 6 |
|
|
thinking: str = "chain_of_thought" |
|
|
temperature: Optional[float] = None |
|
|
top_p: Optional[float] = None |
|
|
max_tokens: Optional[int] = None |
|
|
frequency_penalty: Optional[float] = None |
|
|
system_prompt: Optional[str] = None |
|
|
|
|
|
|
|
|
def build_headers(api_key: str) -> Dict[str, str]: |
|
|
return { |
|
|
"Authorization": f"Bearer {api_key}", |
|
|
"Content-Type": "application/json", |
|
|
} |
|
|
|
|
|
|
|
|
def call_llm(cfg: LLMConfig, messages: List[Dict[str, Any]], tools: List[Dict[str, Any]]) -> Dict[str, Any]: |
|
|
url = cfg.base_url.rstrip("/") + "/chat/completions" |
|
|
payload: Dict[str, Any] = { |
|
|
"model": cfg.model, |
|
|
"messages": messages, |
|
|
"tools": tools, |
|
|
"tool_choice": "auto", |
|
|
} |
|
|
|
|
|
|
|
|
preset = PRESETS.get(cfg.thinking, {}) |
|
|
if cfg.temperature is None: |
|
|
payload["temperature"] = preset.get("temperature", 0.7) |
|
|
else: |
|
|
payload["temperature"] = cfg.temperature |
|
|
if cfg.top_p is None: |
|
|
payload["top_p"] = preset.get("top_p", 0.9) |
|
|
else: |
|
|
payload["top_p"] = cfg.top_p |
|
|
if cfg.max_tokens is None: |
|
|
payload["max_tokens"] = preset.get("max_tokens", 2048) |
|
|
else: |
|
|
payload["max_tokens"] = cfg.max_tokens |
|
|
if cfg.frequency_penalty is None: |
|
|
payload["frequency_penalty"] = preset.get("frequency_penalty", 0.0) |
|
|
else: |
|
|
payload["frequency_penalty"] = cfg.frequency_penalty |
|
|
|
|
|
resp = requests.post(url, headers=build_headers(cfg.api_key), data=json.dumps(payload), timeout=cfg.timeout) |
|
|
resp.raise_for_status() |
|
|
return resp.json() |
|
|
|
|
|
|
|
|
def run_chat(cfg: LLMConfig) -> None: |
|
|
tools = get_elizabeth_tools() |
|
|
|
|
|
system_prompt = cfg.system_prompt or PRESETS.get(cfg.thinking, {}).get("system", "You are Elizabeth, an advanced research and MLOps assistant. You have powerful tools. Use them when helpful.") |
|
|
messages: List[Dict[str, Any]] = [{"role": "system", "content": system_prompt}] |
|
|
|
|
|
|
|
|
ss = None |
|
|
session_id: Optional[str] = None |
|
|
if SessionStore is not None: |
|
|
try: |
|
|
ss = SessionStore() |
|
|
session_id = ss.start_session(title=f"Elizabeth CLI - {datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')} UTC") |
|
|
except Exception: |
|
|
ss = None |
|
|
session_id = None |
|
|
|
|
|
print(f"Elizabeth CLI ready. Base: {cfg.base_url} | Model: {cfg.model} | Thinking: {cfg.thinking}") |
|
|
print("Type /exit to quit. Multi-line supported; end with Enter on empty line.") |
|
|
|
|
|
while True: |
|
|
user_text = read_multiline_input(prompt="> ") |
|
|
if not user_text: |
|
|
continue |
|
|
if user_text.strip() == "/exit": |
|
|
print("Bye.") |
|
|
return |
|
|
if user_text.strip() == "/clear": |
|
|
messages = [{"role": "system", "content": system_prompt}] |
|
|
print("Conversation cleared.") |
|
|
continue |
|
|
if user_text.strip() == "/history": |
|
|
print(f"Messages: {len(messages)}") |
|
|
continue |
|
|
if user_text.startswith("/system "): |
|
|
system_prompt = user_text[len("/system "):].strip() |
|
|
messages = [{"role": "system", "content": system_prompt}] |
|
|
print("System prompt updated.") |
|
|
continue |
|
|
if user_text.startswith("/save "): |
|
|
path = user_text[len("/save "):].strip() |
|
|
try: |
|
|
with open(path, "w", encoding="utf-8") as f: |
|
|
f.write(json.dumps(messages, indent=2)) |
|
|
print(f"Saved transcript to {path}") |
|
|
except Exception as e: |
|
|
print(f"Save failed: {e}") |
|
|
continue |
|
|
|
|
|
messages.append({"role": "user", "content": user_text}) |
|
|
if ss and session_id: |
|
|
try: |
|
|
ss.add_message(session_id, 'user', user_text) |
|
|
except Exception: |
|
|
pass |
|
|
|
|
|
step = 0 |
|
|
while True: |
|
|
step += 1 |
|
|
data = call_llm(cfg, messages, tools) |
|
|
choice = (data.get("choices") or [{}])[0] |
|
|
msg = choice.get("message", {}) |
|
|
|
|
|
|
|
|
tool_calls = msg.get("tool_calls") or [] |
|
|
if tool_calls: |
|
|
for call in tool_calls: |
|
|
fn = call.get("function", {}) |
|
|
name = fn.get("name") |
|
|
arg_str = fn.get("arguments") or "{}" |
|
|
try: |
|
|
args = json.loads(arg_str) if isinstance(arg_str, str) else (arg_str or {}) |
|
|
except json.JSONDecodeError: |
|
|
args = {"_raw": arg_str} |
|
|
|
|
|
try: |
|
|
result = execute_tool(name, args) |
|
|
messages.append({ |
|
|
"role": "tool", |
|
|
"tool_call_id": call.get("id"), |
|
|
"name": name, |
|
|
"content": json.dumps(result), |
|
|
}) |
|
|
print(f"[tool:{name}] -> {result.get('status')}\n") |
|
|
if ss and session_id: |
|
|
try: |
|
|
ss.add_tool_call(session_id, name, args, result) |
|
|
except Exception: |
|
|
pass |
|
|
except ToolError as te: |
|
|
err = {"status": "error", "message": str(te)} |
|
|
messages.append({ |
|
|
"role": "tool", |
|
|
"tool_call_id": call.get("id"), |
|
|
"name": name, |
|
|
"content": json.dumps(err), |
|
|
}) |
|
|
print(f"[tool:{name}] ERROR -> {te}\n") |
|
|
if ss and session_id: |
|
|
try: |
|
|
ss.add_tool_call(session_id, name, args, {"error": str(te)}) |
|
|
except Exception: |
|
|
pass |
|
|
|
|
|
|
|
|
continue |
|
|
|
|
|
|
|
|
content = msg.get("content") or "" |
|
|
if content: |
|
|
print("\n" + content.strip() + "\n") |
|
|
messages.append({"role": "assistant", "content": content}) |
|
|
if ss and session_id and content: |
|
|
try: |
|
|
ss.add_message(session_id, 'assistant', content) |
|
|
except Exception: |
|
|
pass |
|
|
|
|
|
if step >= cfg.max_steps: |
|
|
break |
|
|
break |
|
|
|
|
|
|
|
|
def read_multiline_input(prompt: str = "> ") -> str: |
|
|
print(prompt, end="", flush=True) |
|
|
lines: List[str] = [] |
|
|
while True: |
|
|
try: |
|
|
line = sys.stdin.readline() |
|
|
except KeyboardInterrupt: |
|
|
return "/exit" |
|
|
if not line: |
|
|
|
|
|
break |
|
|
if line.rstrip("\n").strip() == "": |
|
|
break |
|
|
lines.append(line) |
|
|
print(".. ", end="", flush=True) |
|
|
return "".join(lines).strip() |
|
|
|
|
|
|
|
|
def parse_args(argv: Optional[List[str]] = None) -> argparse.Namespace: |
|
|
p = argparse.ArgumentParser( |
|
|
description="Elizabeth interactive CLI with tools (vLLM /chat/completions client)", |
|
|
formatter_class=argparse.ArgumentDefaultsHelpFormatter, |
|
|
) |
|
|
|
|
|
env_base = os.environ.get("ELIZABETH_BASE_URL", DEFAULT_BASE_URL) |
|
|
env_api = os.environ.get("ELIZABETH_API_KEY", DEFAULT_API_KEY) |
|
|
p.add_argument("--base-url", default=env_base, help="Base URL to OpenAI-compatible API (e.g., http://localhost:8000/v1)") |
|
|
|
|
|
p.add_argument("--api-key", default=env_api, help="Bearer API key") |
|
|
p.add_argument("--thinking", default="chain_of_thought", choices=list(PRESETS.keys()), help="Reasoning preset") |
|
|
p.add_argument("--temperature", type=float, default=None, help="Override temperature") |
|
|
p.add_argument("--top-p", type=float, default=None, help="Override top_p") |
|
|
p.add_argument("--max-tokens", type=int, default=None, help="Override max_tokens") |
|
|
p.add_argument("--frequency-penalty", type=float, default=None, help="Override frequency_penalty") |
|
|
p.add_argument("--max-steps", type=int, default=6, help="Max inner tool-calling steps per turn") |
|
|
p.add_argument("--system", default=None, help="Custom system prompt (replaces preset)") |
|
|
return p.parse_args(argv) |
|
|
|
|
|
|
|
|
def main(argv: Optional[List[str]] = None) -> int: |
|
|
_load_dotenv() |
|
|
args = parse_args(argv) |
|
|
cfg = LLMConfig( |
|
|
base_url=args.base_url, |
|
|
model=DEFAULT_MODEL, |
|
|
api_key=args.api_key, |
|
|
max_steps=args.max_steps, |
|
|
thinking=args.thinking, |
|
|
temperature=args.temperature, |
|
|
top_p=args.top_p, |
|
|
max_tokens=args.max_tokens, |
|
|
frequency_penalty=args.frequency_penalty, |
|
|
system_prompt=args.system, |
|
|
) |
|
|
try: |
|
|
run_chat(cfg) |
|
|
return 0 |
|
|
except KeyboardInterrupt: |
|
|
print("\nInterrupted.") |
|
|
return 130 |
|
|
except requests.HTTPError as e: |
|
|
print(f"HTTP error: {e} | Response: {getattr(e, 'response', None) and getattr(e.response, 'text', '')}") |
|
|
return 2 |
|
|
except Exception as e: |
|
|
print(f"Error: {e}") |
|
|
return 1 |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
raise SystemExit(main()) |
|
|
|