| import argparse | |
| import sys | |
| import json | |
| from .model_manager import OVModelManager | |
| # from .api_server import start_server | |
| def main(): | |
| parser = argparse.ArgumentParser(description="OpenVinayaka: Hallucination-Free AI Runner") | |
| parser.add_argument("command", choices=["run", "serve"], help="Command to execute") | |
| parser.add_argument("--model", type=str, default="gpt2", help="HuggingFace model ID or Path to .gguf file") | |
| parser.add_argument("--memory", type=str, help="Path to JSON memory file (Truth Source)") | |
| parser.add_argument("--port", type=int, default=8000, help="Port for API Server") | |
| args = parser.parse_args() | |
| if args.command == "serve": | |
| print("Feature requires fastapi (pip install fastapi uvicorn)") | |
| # print(f"๐ Starting OpenVinayaka API Server on port {args.port}...") | |
| # print(f" OpenAI Compatible Endpoint: http://localhost:{args.port}/v1/chat/completions") | |
| # start_server(port=args.port, model=args.model) | |
| elif args.command == "run": | |
| print(f"๐ OpenVinayaka CLI v1.0") | |
| # ... (rest of run logic remains same) | |
| if __name__ == "__main__": | |
| main() | |